2024-11-13 18:30:24,822 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 18:30:24,838 main DEBUG Took 0.012730 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 18:30:24,838 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 18:30:24,839 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 18:30:24,840 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 18:30:24,841 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,849 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 18:30:24,861 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,863 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,864 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,865 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,866 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,867 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,867 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,868 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,868 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,869 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,871 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,871 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,872 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,872 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,873 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,873 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,874 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,874 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,875 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,875 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,876 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,876 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 18:30:24,877 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,877 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 18:30:24,879 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 18:30:24,881 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 18:30:24,883 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 18:30:24,884 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 18:30:24,885 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 18:30:24,886 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 18:30:24,897 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 18:30:24,900 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 18:30:24,902 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 18:30:24,902 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 18:30:24,903 main DEBUG createAppenders(={Console}) 2024-11-13 18:30:24,904 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-13 18:30:24,904 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 18:30:24,904 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-13 18:30:24,905 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 18:30:24,906 main DEBUG OutputStream closed 2024-11-13 18:30:24,906 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 18:30:24,906 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 18:30:24,907 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-13 18:30:24,988 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 18:30:24,990 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 18:30:24,992 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 18:30:24,993 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 18:30:24,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 18:30:24,994 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 18:30:24,995 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 18:30:24,995 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 18:30:24,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 18:30:24,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 18:30:24,997 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 18:30:24,997 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 18:30:24,998 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 18:30:24,998 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 18:30:24,998 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 18:30:24,999 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 18:30:24,999 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 18:30:25,000 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 18:30:25,002 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 18:30:25,003 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-13 18:30:25,003 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 18:30:25,004 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-13T18:30:25,324 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900 2024-11-13 18:30:25,328 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 18:30:25,328 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T18:30:25,341 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-13T18:30:25,384 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=266, ProcessCount=11, AvailableMemoryMB=3086 2024-11-13T18:30:25,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:30:25,405 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea, deleteOnExit=true 2024-11-13T18:30:25,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:30:25,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/test.cache.data in system properties and HBase conf 2024-11-13T18:30:25,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:30:25,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:30:25,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:30:25,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:30:25,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:30:25,517 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T18:30:25,639 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:30:25,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:30:25,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:30:25,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:30:25,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:30:25,647 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:30:25,648 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:30:25,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:30:25,649 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:30:25,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:30:25,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:30:25,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:30:25,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:30:25,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:30:25,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:30:26,164 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:30:26,509 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T18:30:26,586 INFO [Time-limited test {}] log.Log(170): Logging initialized @2636ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T18:30:26,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:30:26,733 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:30:26,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:30:26,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:30:26,759 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:30:26,771 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:30:26,773 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:30:26,774 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:30:26,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/java.io.tmpdir/jetty-localhost-33645-hadoop-hdfs-3_4_1-tests_jar-_-any-3898581624146153527/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:30:26,993 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33645} 2024-11-13T18:30:26,993 INFO [Time-limited test {}] server.Server(415): Started @3044ms 2024-11-13T18:30:27,018 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:30:27,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:30:27,439 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:30:27,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:30:27,441 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:30:27,441 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:30:27,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:30:27,443 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:30:27,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/java.io.tmpdir/jetty-localhost-44279-hadoop-hdfs-3_4_1-tests_jar-_-any-8335214511634571274/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:30:27,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:44279} 2024-11-13T18:30:27,568 INFO [Time-limited test {}] server.Server(415): Started @3619ms 2024-11-13T18:30:27,629 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:30:27,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:30:27,775 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:30:27,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:30:27,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:30:27,778 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:30:27,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:30:27,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:30:27,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/java.io.tmpdir/jetty-localhost-46685-hadoop-hdfs-3_4_1-tests_jar-_-any-13842352140850974490/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:30:27,939 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:46685} 2024-11-13T18:30:27,939 INFO [Time-limited test {}] server.Server(415): Started @3990ms 2024-11-13T18:30:27,943 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:30:28,125 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data2/current/BP-1387870917-172.17.0.3-1731522626275/current, will proceed with Du for space computation calculation, 2024-11-13T18:30:28,125 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data3/current/BP-1387870917-172.17.0.3-1731522626275/current, will proceed with Du for space computation calculation, 2024-11-13T18:30:28,126 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data1/current/BP-1387870917-172.17.0.3-1731522626275/current, will proceed with Du for space computation calculation, 2024-11-13T18:30:28,126 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data4/current/BP-1387870917-172.17.0.3-1731522626275/current, will proceed with Du for space computation calculation, 2024-11-13T18:30:28,223 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:30:28,224 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:30:28,309 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7d7c217f0ec7c0 with lease ID 0x41c056dd9908144c: Processing first storage report for DS-5ad0645e-839a-475e-b682-aec20d73957e from datanode DatanodeRegistration(127.0.0.1:33651, datanodeUuid=8473ec5d-c1ae-4353-a507-1450353ac727, infoPort=45685, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275) 2024-11-13T18:30:28,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7d7c217f0ec7c0 with lease ID 0x41c056dd9908144c: from storage DS-5ad0645e-839a-475e-b682-aec20d73957e node DatanodeRegistration(127.0.0.1:33651, datanodeUuid=8473ec5d-c1ae-4353-a507-1450353ac727, infoPort=45685, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T18:30:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaac72e23a46ad1e3 with lease ID 0x41c056dd9908144b: Processing first storage report for DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3 from datanode DatanodeRegistration(127.0.0.1:34725, datanodeUuid=c852c6d5-f0df-4483-8b96-bc1ded6e0976, infoPort=41903, infoSecurePort=0, ipcPort=33855, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275) 2024-11-13T18:30:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaac72e23a46ad1e3 with lease ID 0x41c056dd9908144b: from storage DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3 node DatanodeRegistration(127.0.0.1:34725, datanodeUuid=c852c6d5-f0df-4483-8b96-bc1ded6e0976, infoPort=41903, infoSecurePort=0, ipcPort=33855, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:30:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7d7c217f0ec7c0 with lease ID 0x41c056dd9908144c: Processing first storage report for DS-4a013cf4-296c-4feb-ab6a-710babc9d299 from datanode DatanodeRegistration(127.0.0.1:33651, datanodeUuid=8473ec5d-c1ae-4353-a507-1450353ac727, infoPort=45685, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275) 2024-11-13T18:30:28,312 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7d7c217f0ec7c0 with lease ID 0x41c056dd9908144c: from storage DS-4a013cf4-296c-4feb-ab6a-710babc9d299 node DatanodeRegistration(127.0.0.1:33651, datanodeUuid=8473ec5d-c1ae-4353-a507-1450353ac727, infoPort=45685, infoSecurePort=0, ipcPort=45567, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:30:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaac72e23a46ad1e3 with lease ID 0x41c056dd9908144b: Processing first storage report for DS-406d013c-7de3-4685-b2c2-280814431ec3 from datanode DatanodeRegistration(127.0.0.1:34725, datanodeUuid=c852c6d5-f0df-4483-8b96-bc1ded6e0976, infoPort=41903, infoSecurePort=0, ipcPort=33855, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275) 2024-11-13T18:30:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaac72e23a46ad1e3 with lease ID 0x41c056dd9908144b: from storage DS-406d013c-7de3-4685-b2c2-280814431ec3 node DatanodeRegistration(127.0.0.1:34725, datanodeUuid=c852c6d5-f0df-4483-8b96-bc1ded6e0976, infoPort=41903, infoSecurePort=0, ipcPort=33855, storageInfo=lv=-57;cid=testClusterID;nsid=1714166730;c=1731522626275), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:30:28,384 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900 2024-11-13T18:30:28,463 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/zookeeper_0, clientPort=63520, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:30:28,473 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63520 2024-11-13T18:30:28,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:28,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:28,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:30:28,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:30:29,194 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5 with version=8 2024-11-13T18:30:29,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:30:29,301 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T18:30:29,579 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:30:29,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:29,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:29,596 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:30:29,596 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:29,596 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:30:29,736 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:30:29,797 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T18:30:29,806 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T18:30:29,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:30:29,837 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6231 (auto-detected) 2024-11-13T18:30:29,838 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-13T18:30:29,857 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40497 2024-11-13T18:30:29,878 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40497 connecting to ZooKeeper ensemble=127.0.0.1:63520 2024-11-13T18:30:29,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404970x0, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:30:29,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40497-0x100ed5d80070000 connected 2024-11-13T18:30:29,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:29,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:29,960 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:30:29,964 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5, hbase.cluster.distributed=false 2024-11-13T18:30:29,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:30:29,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40497 2024-11-13T18:30:29,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40497 2024-11-13T18:30:29,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40497 2024-11-13T18:30:29,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40497 2024-11-13T18:30:29,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40497 2024-11-13T18:30:30,128 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:30:30,131 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:30,131 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:30,131 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:30:30,132 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:30:30,132 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:30:30,136 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:30:30,139 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:30:30,141 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44965 2024-11-13T18:30:30,144 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44965 connecting to ZooKeeper ensemble=127.0.0.1:63520 2024-11-13T18:30:30,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:30,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:30,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:449650x0, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:30:30,164 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44965-0x100ed5d80070001 connected 2024-11-13T18:30:30,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:30:30,172 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:30:30,182 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:30:30,184 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:30:30,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:30:30,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44965 2024-11-13T18:30:30,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44965 2024-11-13T18:30:30,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44965 2024-11-13T18:30:30,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44965 2024-11-13T18:30:30,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44965 2024-11-13T18:30:30,214 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:40497 2024-11-13T18:30:30,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,40497,1731522629354 2024-11-13T18:30:30,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:30:30,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:30:30,227 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,40497,1731522629354 2024-11-13T18:30:30,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:30:30,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:30,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:30,258 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:30:30,260 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,40497,1731522629354 from backup master directory 2024-11-13T18:30:30,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:30:30,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,40497,1731522629354 2024-11-13T18:30:30,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:30:30,264 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:30:30,265 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,40497,1731522629354 2024-11-13T18:30:30,267 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T18:30:30,268 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T18:30:30,343 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase.id] with ID: 89d37843-98b3-454c-a573-08e3ca5d9442 2024-11-13T18:30:30,344 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/.tmp/hbase.id 2024-11-13T18:30:30,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:30:30,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:30:30,360 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/.tmp/hbase.id]:[hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase.id] 2024-11-13T18:30:30,419 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:30,425 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:30:30,448 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-13T18:30:30,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:30,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:30:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:30:30,508 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:30:30,510 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:30:30,516 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:30:30,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:30:30,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:30:30,586 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store 2024-11-13T18:30:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:30:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:30:30,611 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T18:30:30,614 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:30,615 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:30:30,615 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:30:30,615 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:30:30,617 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:30:30,617 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:30:30,617 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:30:30,618 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522630615Disabling compacts and flushes for region at 1731522630615Disabling writes for close at 1731522630617 (+2 ms)Writing region close event to WAL at 1731522630617Closed at 1731522630617 2024-11-13T18:30:30,620 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/.initializing 2024-11-13T18:30:30,620 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/WALs/39e84130bbc9,40497,1731522629354 2024-11-13T18:30:30,645 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C40497%2C1731522629354, suffix=, logDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/WALs/39e84130bbc9,40497,1731522629354, archiveDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/oldWALs, maxLogs=10 2024-11-13T18:30:30,657 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40497%2C1731522629354.1731522630651 2024-11-13T18:30:30,682 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/WALs/39e84130bbc9,40497,1731522629354/39e84130bbc9%2C40497%2C1731522629354.1731522630651 2024-11-13T18:30:30,690 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:30:30,691 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:30:30,692 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:30,695 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,696 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:30:30,774 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:30,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:30,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:30:30,782 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:30,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:30:30,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:30:30,787 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:30,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:30:30,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:30:30,792 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:30,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:30:30,793 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,796 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,797 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,803 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,803 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,807 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:30:30,810 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:30:30,815 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:30:30,816 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863907, jitterRate=0.09851506352424622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:30:30,823 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522630714Initializing all the Stores at 1731522630717 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522630717Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522630718 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522630718Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522630718Cleaning up temporary data from old regions at 1731522630804 (+86 ms)Region opened successfully at 1731522630823 (+19 ms) 2024-11-13T18:30:30,824 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:30:30,861 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496d6ea5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:30:30,895 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:30:30,911 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:30:30,911 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:30:30,915 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:30:30,916 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T18:30:30,921 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-13T18:30:30,921 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:30:30,948 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:30:30,957 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:30:30,959 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:30:30,962 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:30:30,963 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:30:30,965 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:30:30,967 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:30:30,970 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:30:30,972 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:30:30,973 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:30:30,975 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:30:30,993 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:30:30,995 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:30:30,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:30:30,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:30:30,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:30,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,001 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,40497,1731522629354, sessionid=0x100ed5d80070000, setting cluster-up flag (Was=false) 2024-11-13T18:30:31,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,021 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:30:31,022 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40497,1731522629354 2024-11-13T18:30:31,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,035 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:30:31,036 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40497,1731522629354 2024-11-13T18:30:31,042 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:30:31,098 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(746): ClusterId : 89d37843-98b3-454c-a573-08e3ca5d9442 2024-11-13T18:30:31,101 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:30:31,106 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:30:31,106 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:30:31,109 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:30:31,109 DEBUG [RS:0;39e84130bbc9:44965 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a86495e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:30:31,118 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:30:31,124 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:44965 2024-11-13T18:30:31,127 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:30:31,127 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:30:31,127 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:30:31,129 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:30:31,130 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,40497,1731522629354 with port=44965, startcode=1731522630074 2024-11-13T18:30:31,137 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:30:31,144 DEBUG [RS:0;39e84130bbc9:44965 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:30:31,143 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,40497,1731522629354 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:30:31,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:30:31,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:30:31,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:30:31,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:30:31,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:30:31,151 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,151 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:30:31,151 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,153 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522661153 2024-11-13T18:30:31,155 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:30:31,156 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:30:31,156 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:30:31,157 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:30:31,160 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:30:31,160 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:30:31,160 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:30:31,160 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:30:31,161 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,164 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:30:31,164 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,164 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:30:31,165 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:30:31,165 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:30:31,169 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:30:31,170 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:30:31,172 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522631171,5,FailOnTimeoutGroup] 2024-11-13T18:30:31,173 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522631172,5,FailOnTimeoutGroup] 2024-11-13T18:30:31,173 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,174 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:30:31,175 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,175 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:30:31,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:30:31,186 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:30:31,186 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5 2024-11-13T18:30:31,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:30:31,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:30:31,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:31,207 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:30:31,210 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:30:31,210 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:30:31,213 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:30:31,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:30:31,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:30:31,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:30:31,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:30:31,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:30:31,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740 2024-11-13T18:30:31,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740 2024-11-13T18:30:31,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:30:31,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:30:31,227 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:30:31,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:30:31,233 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:30:31,234 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719613, jitterRate=-0.08496582508087158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:30:31,235 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39345, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:30:31,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522631204Initializing all the Stores at 1731522631206 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631206Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631207 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522631207Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631207Cleaning up temporary data from old regions at 1731522631226 (+19 ms)Region opened successfully at 1731522631237 (+11 ms) 2024-11-13T18:30:31,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:30:31,241 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:30:31,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:30:31,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:30:31,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:30:31,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40497 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,244 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:30:31,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522631241Disabling compacts and flushes for region at 1731522631241Disabling writes for close at 1731522631242 (+1 ms)Writing region close event to WAL at 1731522631243 (+1 ms)Closed at 1731522631244 (+1 ms) 2024-11-13T18:30:31,247 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40497 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,247 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:30:31,248 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:30:31,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:30:31,264 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:30:31,266 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5 2024-11-13T18:30:31,267 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39907 2024-11-13T18:30:31,267 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:30:31,267 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:30:31,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:30:31,272 DEBUG [RS:0;39e84130bbc9:44965 {}] zookeeper.ZKUtil(111): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,272 WARN [RS:0;39e84130bbc9:44965 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:30:31,273 INFO [RS:0;39e84130bbc9:44965 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:30:31,273 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,44965,1731522630074] 2024-11-13T18:30:31,307 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:30:31,327 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:30:31,333 INFO [RS:0;39e84130bbc9:44965 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:30:31,334 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,335 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:30:31,341 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:30:31,343 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,343 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,343 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,344 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,345 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,345 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,345 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,345 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:30:31,345 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:30:31,346 DEBUG [RS:0;39e84130bbc9:44965 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,347 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,44965,1731522630074-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:30:31,368 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:30:31,370 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,44965,1731522630074-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,370 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,370 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.Replication(171): 39e84130bbc9,44965,1731522630074 started 2024-11-13T18:30:31,388 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:31,389 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,44965,1731522630074, RpcServer on 39e84130bbc9/172.17.0.3:44965, sessionid=0x100ed5d80070001 2024-11-13T18:30:31,389 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:30:31,390 DEBUG [RS:0;39e84130bbc9:44965 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,390 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,44965,1731522630074' 2024-11-13T18:30:31,390 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:30:31,391 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:30:31,392 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:30:31,392 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:30:31,392 DEBUG [RS:0;39e84130bbc9:44965 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,392 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,44965,1731522630074' 2024-11-13T18:30:31,392 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:30:31,393 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:30:31,393 DEBUG [RS:0;39e84130bbc9:44965 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:30:31,394 INFO [RS:0;39e84130bbc9:44965 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:30:31,394 INFO [RS:0;39e84130bbc9:44965 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:30:31,418 WARN [39e84130bbc9:40497 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:30:31,503 INFO [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C44965%2C1731522630074, suffix=, logDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074, archiveDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs, maxLogs=32 2024-11-13T18:30:31,506 INFO [RS:0;39e84130bbc9:44965 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522631506 2024-11-13T18:30:31,519 INFO [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522631506 2024-11-13T18:30:31,522 DEBUG [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:30:31,671 DEBUG [39e84130bbc9:40497 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:30:31,683 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,690 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,44965,1731522630074, state=OPENING 2024-11-13T18:30:31,695 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:30:31,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:30:31,700 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:30:31,700 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:30:31,701 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:30:31,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,44965,1731522630074}] 2024-11-13T18:30:31,877 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:30:31,881 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51929, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:30:31,894 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:30:31,895 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:30:31,898 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C44965%2C1731522630074.meta, suffix=.meta, logDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074, archiveDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs, maxLogs=32 2024-11-13T18:30:31,900 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.meta.1731522631900.meta 2024-11-13T18:30:31,908 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.meta.1731522631900.meta 2024-11-13T18:30:31,911 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:30:31,912 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:30:31,914 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:30:31,917 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:30:31,922 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:30:31,927 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:30:31,927 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:31,927 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:30:31,927 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:30:31,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:30:31,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:30:31,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:30:31,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:30:31,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:30:31,938 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:30:31,938 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:30:31,940 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:30:31,940 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:31,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:30:31,941 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:30:31,942 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740 2024-11-13T18:30:31,945 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740 2024-11-13T18:30:31,948 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:30:31,948 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:30:31,949 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:30:31,951 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:30:31,953 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740114, jitterRate=-0.058896616101264954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:30:31,954 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:30:31,955 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522631928Writing region info on filesystem at 1731522631928Initializing all the Stores at 1731522631930 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631930Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631930Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522631930Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522631930Cleaning up temporary data from old regions at 1731522631948 (+18 ms)Running coprocessor post-open hooks at 1731522631954 (+6 ms)Region opened successfully at 1731522631955 (+1 ms) 2024-11-13T18:30:31,962 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522631868 2024-11-13T18:30:31,975 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:30:31,976 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:30:31,978 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,980 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,44965,1731522630074, state=OPEN 2024-11-13T18:30:31,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:30:31,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:30:31,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:30:31,988 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:30:31,988 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:31,994 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:30:31,995 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,44965,1731522630074 in 286 msec 2024-11-13T18:30:32,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:30:32,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 742 msec 2024-11-13T18:30:32,003 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:30:32,003 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:30:32,026 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:30:32,027 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,44965,1731522630074, seqNum=-1] 2024-11-13T18:30:32,053 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:30:32,055 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:30:32,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0140 sec 2024-11-13T18:30:32,087 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522632087, completionTime=-1 2024-11-13T18:30:32,090 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:30:32,090 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:30:32,123 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:30:32,123 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522692123 2024-11-13T18:30:32,123 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522752123 2024-11-13T18:30:32,123 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 32 msec 2024-11-13T18:30:32,126 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,127 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,127 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,129 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:40497, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,129 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,130 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,138 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:30:32,161 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.896sec 2024-11-13T18:30:32,163 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:30:32,164 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:30:32,166 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:30:32,166 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:30:32,167 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:30:32,168 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:30:32,168 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:30:32,178 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:30:32,179 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:30:32,180 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40497,1731522629354-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:30:32,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:30:32,215 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T18:30:32,215 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T18:30:32,220 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,40497,-1 for getting cluster id 2024-11-13T18:30:32,224 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:30:32,233 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '89d37843-98b3-454c-a573-08e3ca5d9442' 2024-11-13T18:30:32,236 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:30:32,236 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "89d37843-98b3-454c-a573-08e3ca5d9442" 2024-11-13T18:30:32,237 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e75f358, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:30:32,237 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,40497,-1] 2024-11-13T18:30:32,240 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:30:32,242 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:30:32,244 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:30:32,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:30:32,248 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:30:32,257 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,44965,1731522630074, seqNum=-1] 2024-11-13T18:30:32,258 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:30:32,261 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:30:32,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,40497,1731522629354 2024-11-13T18:30:32,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:30:32,321 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:30:32,326 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T18:30:32,336 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 39e84130bbc9,40497,1731522629354 2024-11-13T18:30:32,340 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@227d5dfb 2024-11-13T18:30:32,341 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T18:30:32,344 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35300, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T18:30:32,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T18:30:32,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T18:30:32,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:30:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-13T18:30:32,361 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T18:30:32,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-13T18:30:32,363 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:32,366 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T18:30:32,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:30:32,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741835_1011 (size=389) 2024-11-13T18:30:32,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741835_1011 (size=389) 2024-11-13T18:30:32,401 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1ccbdc51916553952674527060ff7b7a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5 2024-11-13T18:30:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741836_1012 (size=72) 2024-11-13T18:30:32,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741836_1012 (size=72) 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1ccbdc51916553952674527060ff7b7a, disabling compactions & flushes 2024-11-13T18:30:32,412 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. after waiting 0 ms 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,412 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1ccbdc51916553952674527060ff7b7a: Waiting for close lock at 1731522632412Disabling compacts and flushes for region at 1731522632412Disabling writes for close at 1731522632412Writing region close event to WAL at 1731522632412Closed at 1731522632412 2024-11-13T18:30:32,414 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T18:30:32,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731522632415"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522632415"}]},"ts":"1731522632415"} 2024-11-13T18:30:32,424 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T18:30:32,426 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T18:30:32,430 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522632426"}]},"ts":"1731522632426"} 2024-11-13T18:30:32,436 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-13T18:30:32,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ccbdc51916553952674527060ff7b7a, ASSIGN}] 2024-11-13T18:30:32,441 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ccbdc51916553952674527060ff7b7a, ASSIGN 2024-11-13T18:30:32,443 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ccbdc51916553952674527060ff7b7a, ASSIGN; state=OFFLINE, location=39e84130bbc9,44965,1731522630074; forceNewPlan=false, retain=false 2024-11-13T18:30:32,595 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ccbdc51916553952674527060ff7b7a, regionState=OPENING, regionLocation=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:32,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ccbdc51916553952674527060ff7b7a, ASSIGN because future has completed 2024-11-13T18:30:32,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ccbdc51916553952674527060ff7b7a, server=39e84130bbc9,44965,1731522630074}] 2024-11-13T18:30:32,762 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,762 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ccbdc51916553952674527060ff7b7a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:30:32,763 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,763 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:30:32,763 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,764 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,766 INFO [StoreOpener-1ccbdc51916553952674527060ff7b7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,769 INFO [StoreOpener-1ccbdc51916553952674527060ff7b7a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ccbdc51916553952674527060ff7b7a columnFamilyName info 2024-11-13T18:30:32,769 DEBUG [StoreOpener-1ccbdc51916553952674527060ff7b7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:30:32,770 INFO [StoreOpener-1ccbdc51916553952674527060ff7b7a-1 {}] regionserver.HStore(327): Store=1ccbdc51916553952674527060ff7b7a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:30:32,770 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,772 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,772 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,773 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,773 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,776 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,779 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:30:32,780 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1ccbdc51916553952674527060ff7b7a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777130, jitterRate=-0.01182885468006134}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:30:32,780 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:32,781 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1ccbdc51916553952674527060ff7b7a: Running coprocessor pre-open hook at 1731522632764Writing region info on filesystem at 1731522632764Initializing all the Stores at 1731522632765 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522632766 (+1 ms)Cleaning up temporary data from old regions at 1731522632773 (+7 ms)Running coprocessor post-open hooks at 1731522632780 (+7 ms)Region opened successfully at 1731522632781 (+1 ms) 2024-11-13T18:30:32,783 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a., pid=6, masterSystemTime=1731522632755 2024-11-13T18:30:32,788 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,788 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:32,789 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1ccbdc51916553952674527060ff7b7a, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,44965,1731522630074 2024-11-13T18:30:32,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ccbdc51916553952674527060ff7b7a, server=39e84130bbc9,44965,1731522630074 because future has completed 2024-11-13T18:30:32,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T18:30:32,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ccbdc51916553952674527060ff7b7a, server=39e84130bbc9,44965,1731522630074 in 195 msec 2024-11-13T18:30:32,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T18:30:32,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1ccbdc51916553952674527060ff7b7a, ASSIGN in 360 msec 2024-11-13T18:30:32,805 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T18:30:32,806 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522632805"}]},"ts":"1731522632805"} 2024-11-13T18:30:32,809 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-13T18:30:32,810 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T18:30:32,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 457 msec 2024-11-13T18:30:37,431 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T18:30:37,491 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T18:30:37,493 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-13T18:30:39,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:30:39,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T18:30:39,797 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T18:30:39,797 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T18:30:39,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:30:39,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T18:30:39,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T18:30:39,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T18:30:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40497 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:30:42,424 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-13T18:30:42,428 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-13T18:30:42,436 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-13T18:30:42,437 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:30:42,438 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522642438 2024-11-13T18:30:42,448 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:30:42,448 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:30:42,449 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:30:42,449 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:30:42,449 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:30:42,450 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522631506 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522642438 2024-11-13T18:30:42,451 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:30:42,451 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522631506 is not closed yet, will try archiving it next time 2024-11-13T18:30:42,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741833_1009 (size=451) 2024-11-13T18:30:42,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741833_1009 (size=451) 2024-11-13T18:30:42,456 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522631506 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522631506 2024-11-13T18:30:42,462 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a., hostname=39e84130bbc9,44965,1731522630074, seqNum=2] 2024-11-13T18:30:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44965 {}] regionserver.HRegion(8855): Flush requested on 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:30:54,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ccbdc51916553952674527060ff7b7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:30:54,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c5267229e16745ea94969314043b5a19 is 1080, key is row0001/info:/1731522642464/Put/seqid=0 2024-11-13T18:30:54,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741838_1014 (size=12509) 2024-11-13T18:30:54,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741838_1014 (size=12509) 2024-11-13T18:30:55,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c5267229e16745ea94969314043b5a19 2024-11-13T18:30:55,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c5267229e16745ea94969314043b5a19 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19 2024-11-13T18:30:55,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T18:30:55,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 621ms, sequenceid=11, compaction requested=false 2024-11-13T18:30:55,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ccbdc51916553952674527060ff7b7a: 2024-11-13T18:30:58,381 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:31:02,537 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522662536 2024-11-13T18:31:02,754 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 214 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:02,754 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:02,755 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:02,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:02,756 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:02,756 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:02,756 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522642438 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522662536 2024-11-13T18:31:02,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741837_1013 (size=12399) 2024-11-13T18:31:02,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741837_1013 (size=12399) 2024-11-13T18:31:02,769 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:02,770 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522642438 is not closed yet, will try archiving it next time 2024-11-13T18:31:02,974 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:05,178 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:07,383 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:09,588 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:09,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44965 {}] regionserver.HRegion(8855): Flush requested on 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:31:09,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ccbdc51916553952674527060ff7b7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:31:09,790 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:09,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c6bf19aaca3042adb60380ae3877ac07 is 1080, key is row0008/info:/1731522656499/Put/seqid=0 2024-11-13T18:31:09,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741840_1016 (size=12509) 2024-11-13T18:31:09,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741840_1016 (size=12509) 2024-11-13T18:31:09,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c6bf19aaca3042adb60380ae3877ac07 2024-11-13T18:31:09,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/c6bf19aaca3042adb60380ae3877ac07 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07 2024-11-13T18:31:09,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07, entries=7, sequenceid=21, filesize=12.2 K 2024-11-13T18:31:10,031 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:10,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 443ms, sequenceid=21, compaction requested=false 2024-11-13T18:31:10,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ccbdc51916553952674527060ff7b7a: 2024-11-13T18:31:10,032 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-13T18:31:10,032 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:10,033 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19 because midkey is the same as first or last row 2024-11-13T18:31:11,796 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:13,143 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T18:31:13,144 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T18:31:14,008 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:14,011 WARN [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:14,013 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C44965%2C1731522630074:(num 1731522662536) roll requested 2024-11-13T18:31:14,014 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522674013 2024-11-13T18:31:14,225 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:14,225 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:14,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:14,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:14,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:14,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:14,226 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522662536 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522674013 2024-11-13T18:31:14,228 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:14,228 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522662536 is not closed yet, will try archiving it next time 2024-11-13T18:31:14,228 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522642438 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522642438 2024-11-13T18:31:14,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741839_1015 (size=7739) 2024-11-13T18:31:14,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741839_1015 (size=7739) 2024-11-13T18:31:16,214 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:17,763 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1ccbdc51916553952674527060ff7b7a, had cached 0 bytes from a total of 25018 2024-11-13T18:31:18,418 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:20,623 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:22,827 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:24,830 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T18:31:24,831 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522684830 2024-11-13T18:31:28,381 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:31:29,842 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:29,844 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:29,844 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C44965%2C1731522630074:(num 1731522684830) roll requested 2024-11-13T18:31:29,844 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:29,844 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:29,844 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:29,845 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:29,845 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:29,845 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522674013 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522684830 2024-11-13T18:31:29,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741841_1017 (size=4753) 2024-11-13T18:31:29,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741841_1017 (size=4753) 2024-11-13T18:31:29,854 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41903:41903),(127.0.0.1/127.0.0.1:45685:45685)] 2024-11-13T18:31:29,854 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522674013 is not closed yet, will try archiving it next time 2024-11-13T18:31:29,854 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522689854 2024-11-13T18:31:34,858 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:34,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44965 {}] regionserver.HRegion(8855): Flush requested on 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:31:34,858 WARN [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:34,859 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ccbdc51916553952674527060ff7b7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:31:34,871 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5014 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:34,871 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5014 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:36,861 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T18:31:39,862 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:39,862 WARN [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK], DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK]] 2024-11-13T18:31:39,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:39,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:39,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:39,863 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:39,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:39,865 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522684830 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522689854 2024-11-13T18:31:39,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741842_1018 (size=1569) 2024-11-13T18:31:39,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741842_1018 (size=1569) 2024-11-13T18:31:39,870 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:39,870 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522684830 is not closed yet, will try archiving it next time 2024-11-13T18:31:39,870 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C44965%2C1731522630074:(num 1731522689854) roll requested 2024-11-13T18:31:39,871 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522699870 2024-11-13T18:31:39,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/5501f6e7b90345faa80f3ff6b9c0adea is 1080, key is row0015/info:/1731522671591/Put/seqid=0 2024-11-13T18:31:39,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741844_1020 (size=12509) 2024-11-13T18:31:39,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741844_1020 (size=12509) 2024-11-13T18:31:39,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/5501f6e7b90345faa80f3ff6b9c0adea 2024-11-13T18:31:39,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/5501f6e7b90345faa80f3ff6b9c0adea as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea 2024-11-13T18:31:39,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea, entries=7, sequenceid=31, filesize=12.2 K 2024-11-13T18:31:44,878 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:44,878 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:44,915 INFO [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:44,915 WARN [FSHLog-0-hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5-prefix:39e84130bbc9,44965,1731522630074 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33651,DS-5ad0645e-839a-475e-b682-aec20d73957e,DISK], DatanodeInfoWithStorage[127.0.0.1:34725,DS-431c0b3c-a043-4dd7-b55e-b062dd4a49a3,DISK]] 2024-11-13T18:31:44,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 10057ms, sequenceid=31, compaction requested=true 2024-11-13T18:31:44,915 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ccbdc51916553952674527060ff7b7a: 2024-11-13T18:31:44,915 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,916 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,916 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-13T18:31:44,916 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,916 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:44,916 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,916 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19 because midkey is the same as first or last row 2024-11-13T18:31:44,916 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522689854 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522699870 2024-11-13T18:31:44,917 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:44,917 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522689854 is not closed yet, will try archiving it next time 2024-11-13T18:31:44,918 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522662536 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522662536 2024-11-13T18:31:44,918 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C44965%2C1731522630074:(num 1731522704918) roll requested 2024-11-13T18:31:44,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1ccbdc51916553952674527060ff7b7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:31:44,918 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522704918 2024-11-13T18:31:44,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741843_1019 (size=438) 2024-11-13T18:31:44,920 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522674013 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522674013 2024-11-13T18:31:44,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:31:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741843_1019 (size=438) 2024-11-13T18:31:44,922 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522684830 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522684830 2024-11-13T18:31:44,924 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:31:44,924 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522689854 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522689854 2024-11-13T18:31:44,927 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,927 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:31:44,927 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,927 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,927 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,927 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,927 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522699870 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522704918 2024-11-13T18:31:44,928 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HStore(1541): 1ccbdc51916553952674527060ff7b7a/info is initiating minor compaction (all files) 2024-11-13T18:31:44,929 INFO [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1ccbdc51916553952674527060ff7b7a/info in TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:31:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741845_1021 (size=93) 2024-11-13T18:31:44,930 INFO [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea] into tmpdir=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp, totalSize=36.6 K 2024-11-13T18:31:44,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741845_1021 (size=93) 2024-11-13T18:31:44,931 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:44,931 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522699870 is not closed yet, will try archiving it next time 2024-11-13T18:31:44,931 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522699870 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs/39e84130bbc9%2C44965%2C1731522630074.1731522699870 2024-11-13T18:31:44,931 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5267229e16745ea94969314043b5a19, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731522642464 2024-11-13T18:31:44,932 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C44965%2C1731522630074.1731522704932 2024-11-13T18:31:44,934 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6bf19aaca3042adb60380ae3877ac07, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731522656499 2024-11-13T18:31:44,935 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5501f6e7b90345faa80f3ff6b9c0adea, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731522671591 2024-11-13T18:31:44,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,942 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:31:44,942 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522704918 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522704932 2024-11-13T18:31:44,943 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45685:45685),(127.0.0.1/127.0.0.1:41903:41903)] 2024-11-13T18:31:44,943 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/WALs/39e84130bbc9,44965,1731522630074/39e84130bbc9%2C44965%2C1731522630074.1731522704918 is not closed yet, will try archiving it next time 2024-11-13T18:31:44,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741846_1022 (size=1258) 2024-11-13T18:31:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741846_1022 (size=1258) 2024-11-13T18:31:44,966 INFO [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1ccbdc51916553952674527060ff7b7a#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:31:44,967 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/14a0e4084c3f4bd8a164dfb332741b68 is 1080, key is row0001/info:/1731522642464/Put/seqid=0 2024-11-13T18:31:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741848_1024 (size=27710) 2024-11-13T18:31:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741848_1024 (size=27710) 2024-11-13T18:31:44,985 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/14a0e4084c3f4bd8a164dfb332741b68 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/14a0e4084c3f4bd8a164dfb332741b68 2024-11-13T18:31:45,004 INFO [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1ccbdc51916553952674527060ff7b7a/info of 1ccbdc51916553952674527060ff7b7a into 14a0e4084c3f4bd8a164dfb332741b68(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:31:45,005 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1ccbdc51916553952674527060ff7b7a: 2024-11-13T18:31:45,008 INFO [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a., storeName=1ccbdc51916553952674527060ff7b7a/info, priority=13, startTime=1731522704917; duration=0sec 2024-11-13T18:31:45,008 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T18:31:45,008 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:45,009 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/14a0e4084c3f4bd8a164dfb332741b68 because midkey is the same as first or last row 2024-11-13T18:31:45,009 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T18:31:45,009 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:45,009 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/14a0e4084c3f4bd8a164dfb332741b68 because midkey is the same as first or last row 2024-11-13T18:31:45,010 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T18:31:45,010 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:45,010 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/14a0e4084c3f4bd8a164dfb332741b68 because midkey is the same as first or last row 2024-11-13T18:31:45,010 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:31:45,010 DEBUG [RS:0;39e84130bbc9:44965-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1ccbdc51916553952674527060ff7b7a:info 2024-11-13T18:31:56,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44965 {}] regionserver.HRegion(8855): Flush requested on 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:31:56,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1ccbdc51916553952674527060ff7b7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:31:56,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/17f5acf1f12f4c988e2a924e2a2cfefd is 1080, key is row0022/info:/1731522704932/Put/seqid=0 2024-11-13T18:31:56,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741849_1025 (size=12509) 2024-11-13T18:31:56,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741849_1025 (size=12509) 2024-11-13T18:31:56,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/17f5acf1f12f4c988e2a924e2a2cfefd 2024-11-13T18:31:57,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/17f5acf1f12f4c988e2a924e2a2cfefd as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/17f5acf1f12f4c988e2a924e2a2cfefd 2024-11-13T18:31:57,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/17f5acf1f12f4c988e2a924e2a2cfefd, entries=7, sequenceid=42, filesize=12.2 K 2024-11-13T18:31:57,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 53ms, sequenceid=42, compaction requested=false 2024-11-13T18:31:57,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1ccbdc51916553952674527060ff7b7a: 2024-11-13T18:31:57,021 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-13T18:31:57,021 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:31:57,021 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/14a0e4084c3f4bd8a164dfb332741b68 because midkey is the same as first or last row 2024-11-13T18:31:58,382 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:32:02,764 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1ccbdc51916553952674527060ff7b7a, had cached 0 bytes from a total of 40219 2024-11-13T18:32:04,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:32:04,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:04,983 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:04,990 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:04,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:04,993 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:32:04,993 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:32:04,993 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=488572464, stopped=false 2024-11-13T18:32:04,994 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,40497,1731522629354 2024-11-13T18:32:04,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:04,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:04,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:04,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:04,998 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:04,998 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:04,998 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:04,998 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:04,998 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:04,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:04,999 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,44965,1731522630074' ***** 2024-11-13T18:32:04,999 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:32:04,999 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:32:05,000 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:32:05,000 INFO [RS:0;39e84130bbc9:44965 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:32:05,000 INFO [RS:0;39e84130bbc9:44965 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:32:05,000 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(3091): Received CLOSE for 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:32:05,001 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,44965,1731522630074 2024-11-13T18:32:05,001 INFO [RS:0;39e84130bbc9:44965 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:05,001 INFO [RS:0;39e84130bbc9:44965 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:44965. 2024-11-13T18:32:05,001 DEBUG [RS:0;39e84130bbc9:44965 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:05,001 DEBUG [RS:0;39e84130bbc9:44965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:05,001 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:32:05,001 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:32:05,002 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:32:05,002 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:32:05,002 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1ccbdc51916553952674527060ff7b7a, disabling compactions & flushes 2024-11-13T18:32:05,002 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:32:05,002 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:32:05,002 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. after waiting 0 ms 2024-11-13T18:32:05,002 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:32:05,003 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1ccbdc51916553952674527060ff7b7a 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-13T18:32:05,005 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T18:32:05,005 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1325): Online Regions={1ccbdc51916553952674527060ff7b7a=TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T18:32:05,005 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:05,006 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:05,006 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:05,006 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:05,006 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1ccbdc51916553952674527060ff7b7a 2024-11-13T18:32:05,006 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:05,006 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-13T18:32:05,010 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/86f03ffe7def4e1f98607893d949aa2a is 1080, key is row0029/info:/1731522718970/Put/seqid=0 2024-11-13T18:32:05,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741850_1026 (size=8193) 2024-11-13T18:32:05,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741850_1026 (size=8193) 2024-11-13T18:32:05,021 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/86f03ffe7def4e1f98607893d949aa2a 2024-11-13T18:32:05,032 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/.tmp/info/86f03ffe7def4e1f98607893d949aa2a as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/86f03ffe7def4e1f98607893d949aa2a 2024-11-13T18:32:05,037 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/info/123be8e68bb249e6bc26d32c82c43321 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a./info:regioninfo/1731522632789/Put/seqid=0 2024-11-13T18:32:05,044 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/86f03ffe7def4e1f98607893d949aa2a, entries=3, sequenceid=48, filesize=8.0 K 2024-11-13T18:32:05,047 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 45ms, sequenceid=48, compaction requested=true 2024-11-13T18:32:05,053 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea] to archive 2024-11-13T18:32:05,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741851_1027 (size=7016) 2024-11-13T18:32:05,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741851_1027 (size=7016) 2024-11-13T18:32:05,056 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/info/123be8e68bb249e6bc26d32c82c43321 2024-11-13T18:32:05,058 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:32:05,062 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c5267229e16745ea94969314043b5a19 2024-11-13T18:32:05,065 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07 to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/c6bf19aaca3042adb60380ae3877ac07 2024-11-13T18:32:05,068 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/info/5501f6e7b90345faa80f3ff6b9c0adea 2024-11-13T18:32:05,094 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/ns/1c7a93f9dd3b48478e0b8d79258da673 is 43, key is default/ns:d/1731522632061/Put/seqid=0 2024-11-13T18:32:05,089 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39e84130bbc9:40497 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T18:32:05,096 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c5267229e16745ea94969314043b5a19=12509, c6bf19aaca3042adb60380ae3877ac07=12509, 5501f6e7b90345faa80f3ff6b9c0adea=12509] 2024-11-13T18:32:05,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741852_1028 (size=5153) 2024-11-13T18:32:05,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741852_1028 (size=5153) 2024-11-13T18:32:05,109 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/ns/1c7a93f9dd3b48478e0b8d79258da673 2024-11-13T18:32:05,116 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/default/TestLogRolling-testSlowSyncLogRolling/1ccbdc51916553952674527060ff7b7a/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-13T18:32:05,120 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:32:05,120 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1ccbdc51916553952674527060ff7b7a: Waiting for close lock at 1731522725001Running coprocessor pre-close hooks at 1731522725002 (+1 ms)Disabling compacts and flushes for region at 1731522725002Disabling writes for close at 1731522725002Obtaining lock to block concurrent updates at 1731522725003 (+1 ms)Preparing flush snapshotting stores in 1ccbdc51916553952674527060ff7b7a at 1731522725003Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731522725003Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. at 1731522725004 (+1 ms)Flushing 1ccbdc51916553952674527060ff7b7a/info: creating writer at 1731522725004Flushing 1ccbdc51916553952674527060ff7b7a/info: appending metadata at 1731522725009 (+5 ms)Flushing 1ccbdc51916553952674527060ff7b7a/info: closing flushed file at 1731522725009Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c4363d5: reopening flushed file at 1731522725031 (+22 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1ccbdc51916553952674527060ff7b7a in 45ms, sequenceid=48, compaction requested=true at 1731522725047 (+16 ms)Writing region close event to WAL at 1731522725098 (+51 ms)Running coprocessor post-close hooks at 1731522725118 (+20 ms)Closed at 1731522725120 (+2 ms) 2024-11-13T18:32:05,121 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731522632345.1ccbdc51916553952674527060ff7b7a. 2024-11-13T18:32:05,145 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/table/2801192248d144ab9b422dca0e81f8a2 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731522632805/Put/seqid=0 2024-11-13T18:32:05,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741853_1029 (size=5396) 2024-11-13T18:32:05,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741853_1029 (size=5396) 2024-11-13T18:32:05,156 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/table/2801192248d144ab9b422dca0e81f8a2 2024-11-13T18:32:05,168 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/info/123be8e68bb249e6bc26d32c82c43321 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/info/123be8e68bb249e6bc26d32c82c43321 2024-11-13T18:32:05,183 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/info/123be8e68bb249e6bc26d32c82c43321, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T18:32:05,185 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/ns/1c7a93f9dd3b48478e0b8d79258da673 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/ns/1c7a93f9dd3b48478e0b8d79258da673 2024-11-13T18:32:05,207 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/ns/1c7a93f9dd3b48478e0b8d79258da673, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T18:32:05,208 DEBUG [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T18:32:05,210 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/.tmp/table/2801192248d144ab9b422dca0e81f8a2 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/table/2801192248d144ab9b422dca0e81f8a2 2024-11-13T18:32:05,231 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/table/2801192248d144ab9b422dca0e81f8a2, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T18:32:05,232 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 226ms, sequenceid=11, compaction requested=false 2024-11-13T18:32:05,250 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T18:32:05,251 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:05,251 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:05,251 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522725005Running coprocessor pre-close hooks at 1731522725005Disabling compacts and flushes for region at 1731522725005Disabling writes for close at 1731522725006 (+1 ms)Obtaining lock to block concurrent updates at 1731522725006Preparing flush snapshotting stores in 1588230740 at 1731522725006Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731522725007 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731522725008 (+1 ms)Flushing 1588230740/info: creating writer at 1731522725008Flushing 1588230740/info: appending metadata at 1731522725036 (+28 ms)Flushing 1588230740/info: closing flushed file at 1731522725036Flushing 1588230740/ns: creating writer at 1731522725068 (+32 ms)Flushing 1588230740/ns: appending metadata at 1731522725093 (+25 ms)Flushing 1588230740/ns: closing flushed file at 1731522725093Flushing 1588230740/table: creating writer at 1731522725124 (+31 ms)Flushing 1588230740/table: appending metadata at 1731522725144 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731522725145 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7498736b: reopening flushed file at 1731522725167 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bc4884f: reopening flushed file at 1731522725183 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b52c549: reopening flushed file at 1731522725207 (+24 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 226ms, sequenceid=11, compaction requested=false at 1731522725233 (+26 ms)Writing region close event to WAL at 1731522725236 (+3 ms)Running coprocessor post-close hooks at 1731522725251 (+15 ms)Closed at 1731522725251 2024-11-13T18:32:05,252 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:05,351 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T18:32:05,351 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T18:32:05,356 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:05,409 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,44965,1731522630074; all regions closed. 2024-11-13T18:32:05,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,412 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,412 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741834_1010 (size=3066) 2024-11-13T18:32:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741834_1010 (size=3066) 2024-11-13T18:32:05,429 DEBUG [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs 2024-11-13T18:32:05,429 INFO [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C44965%2C1731522630074.meta:.meta(num 1731522631900) 2024-11-13T18:32:05,432 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,432 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,432 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,433 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,433 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741847_1023 (size=12695) 2024-11-13T18:32:05,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741847_1023 (size=12695) 2024-11-13T18:32:05,441 DEBUG [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/oldWALs 2024-11-13T18:32:05,441 INFO [RS:0;39e84130bbc9:44965 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C44965%2C1731522630074:(num 1731522704932) 2024-11-13T18:32:05,441 DEBUG [RS:0;39e84130bbc9:44965 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:05,441 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:05,442 INFO [RS:0;39e84130bbc9:44965 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:05,442 INFO [RS:0;39e84130bbc9:44965 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:05,442 INFO [RS:0;39e84130bbc9:44965 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:05,442 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:05,443 INFO [RS:0;39e84130bbc9:44965 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44965 2024-11-13T18:32:05,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,44965,1731522630074 2024-11-13T18:32:05,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:05,447 INFO [RS:0;39e84130bbc9:44965 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:05,448 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,44965,1731522630074] 2024-11-13T18:32:05,450 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,44965,1731522630074 already deleted, retry=false 2024-11-13T18:32:05,450 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,44965,1731522630074 expired; onlineServers=0 2024-11-13T18:32:05,451 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,40497,1731522629354' ***** 2024-11-13T18:32:05,451 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:32:05,451 INFO [M:0;39e84130bbc9:40497 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:05,451 INFO [M:0;39e84130bbc9:40497 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:05,451 DEBUG [M:0;39e84130bbc9:40497 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:32:05,451 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:32:05,451 DEBUG [M:0;39e84130bbc9:40497 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:32:05,451 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522631172 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522631172,5,FailOnTimeoutGroup] 2024-11-13T18:32:05,451 INFO [M:0;39e84130bbc9:40497 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:05,452 INFO [M:0;39e84130bbc9:40497 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:05,452 DEBUG [M:0;39e84130bbc9:40497 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:32:05,452 INFO [M:0;39e84130bbc9:40497 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:32:05,452 INFO [M:0;39e84130bbc9:40497 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:05,452 INFO [M:0;39e84130bbc9:40497 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:32:05,453 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:32:05,453 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522631171 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522631171,5,FailOnTimeoutGroup] 2024-11-13T18:32:05,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:05,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:05,454 DEBUG [M:0;39e84130bbc9:40497 {}] zookeeper.ZKUtil(347): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:32:05,454 WARN [M:0;39e84130bbc9:40497 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:32:05,455 INFO [M:0;39e84130bbc9:40497 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/.lastflushedseqids 2024-11-13T18:32:05,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741854_1030 (size=130) 2024-11-13T18:32:05,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741854_1030 (size=130) 2024-11-13T18:32:05,470 INFO [M:0;39e84130bbc9:40497 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:32:05,470 INFO [M:0;39e84130bbc9:40497 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:32:05,471 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:05,471 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:05,471 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:05,471 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:05,471 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:05,471 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-13T18:32:05,498 DEBUG [M:0;39e84130bbc9:40497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/437f07367c7d45e89a3dd560d3762b4a is 82, key is hbase:meta,,1/info:regioninfo/1731522631977/Put/seqid=0 2024-11-13T18:32:05,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741855_1031 (size=5672) 2024-11-13T18:32:05,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741855_1031 (size=5672) 2024-11-13T18:32:05,507 INFO [M:0;39e84130bbc9:40497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/437f07367c7d45e89a3dd560d3762b4a 2024-11-13T18:32:05,534 DEBUG [M:0;39e84130bbc9:40497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90c2da839fcf4967b7526b7e3002cd85 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731522632812/Put/seqid=0 2024-11-13T18:32:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741856_1032 (size=6247) 2024-11-13T18:32:05,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741856_1032 (size=6247) 2024-11-13T18:32:05,547 INFO [M:0;39e84130bbc9:40497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90c2da839fcf4967b7526b7e3002cd85 2024-11-13T18:32:05,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:05,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44965-0x100ed5d80070001, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:05,551 INFO [RS:0;39e84130bbc9:44965 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:05,552 INFO [RS:0;39e84130bbc9:44965 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,44965,1731522630074; zookeeper connection closed. 2024-11-13T18:32:05,552 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52e043d8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52e043d8 2024-11-13T18:32:05,553 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:32:05,556 INFO [M:0;39e84130bbc9:40497 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 90c2da839fcf4967b7526b7e3002cd85 2024-11-13T18:32:05,577 DEBUG [M:0;39e84130bbc9:40497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31bbf74fbefe46b5a55e24ccd1dc1f8f is 69, key is 39e84130bbc9,44965,1731522630074/rs:state/1731522631250/Put/seqid=0 2024-11-13T18:32:05,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741857_1033 (size=5156) 2024-11-13T18:32:05,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741857_1033 (size=5156) 2024-11-13T18:32:05,585 INFO [M:0;39e84130bbc9:40497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31bbf74fbefe46b5a55e24ccd1dc1f8f 2024-11-13T18:32:05,611 DEBUG [M:0;39e84130bbc9:40497 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c001effdf6e14d5980462c676c5708a1 is 52, key is load_balancer_on/state:d/1731522632317/Put/seqid=0 2024-11-13T18:32:05,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741858_1034 (size=5056) 2024-11-13T18:32:05,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741858_1034 (size=5056) 2024-11-13T18:32:05,624 INFO [M:0;39e84130bbc9:40497 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c001effdf6e14d5980462c676c5708a1 2024-11-13T18:32:05,634 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/437f07367c7d45e89a3dd560d3762b4a as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/437f07367c7d45e89a3dd560d3762b4a 2024-11-13T18:32:05,644 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/437f07367c7d45e89a3dd560d3762b4a, entries=8, sequenceid=59, filesize=5.5 K 2024-11-13T18:32:05,645 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/90c2da839fcf4967b7526b7e3002cd85 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90c2da839fcf4967b7526b7e3002cd85 2024-11-13T18:32:05,654 INFO [M:0;39e84130bbc9:40497 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 90c2da839fcf4967b7526b7e3002cd85 2024-11-13T18:32:05,654 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/90c2da839fcf4967b7526b7e3002cd85, entries=6, sequenceid=59, filesize=6.1 K 2024-11-13T18:32:05,659 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/31bbf74fbefe46b5a55e24ccd1dc1f8f as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/31bbf74fbefe46b5a55e24ccd1dc1f8f 2024-11-13T18:32:05,668 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/31bbf74fbefe46b5a55e24ccd1dc1f8f, entries=1, sequenceid=59, filesize=5.0 K 2024-11-13T18:32:05,670 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c001effdf6e14d5980462c676c5708a1 as hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c001effdf6e14d5980462c676c5708a1 2024-11-13T18:32:05,678 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c001effdf6e14d5980462c676c5708a1, entries=1, sequenceid=59, filesize=4.9 K 2024-11-13T18:32:05,679 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 208ms, sequenceid=59, compaction requested=false 2024-11-13T18:32:05,681 INFO [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:05,682 DEBUG [M:0;39e84130bbc9:40497 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522725471Disabling compacts and flushes for region at 1731522725471Disabling writes for close at 1731522725471Obtaining lock to block concurrent updates at 1731522725471Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522725471Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731522725472 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522725473 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522725473Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522725497 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522725497Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522725514 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522725533 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522725533Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522725556 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522725577 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522725577Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522725591 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522725610 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522725610Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@708f579f: reopening flushed file at 1731522725633 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f29f738: reopening flushed file at 1731522725644 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9dacde5: reopening flushed file at 1731522725655 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1546907b: reopening flushed file at 1731522725668 (+13 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 208ms, sequenceid=59, compaction requested=false at 1731522725679 (+11 ms)Writing region close event to WAL at 1731522725681 (+2 ms)Closed at 1731522725681 2024-11-13T18:32:05,683 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,683 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,683 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,683 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,684 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:05,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34725 is added to blk_1073741830_1006 (size=27973) 2024-11-13T18:32:05,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33651 is added to blk_1073741830_1006 (size=27973) 2024-11-13T18:32:05,689 INFO [M:0;39e84130bbc9:40497 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:32:05,689 INFO [M:0;39e84130bbc9:40497 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40497 2024-11-13T18:32:05,689 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:05,689 INFO [M:0;39e84130bbc9:40497 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:05,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:05,795 INFO [M:0;39e84130bbc9:40497 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:05,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40497-0x100ed5d80070000, quorum=127.0.0.1:63520, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:05,826 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:05,829 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:05,830 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:05,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:05,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:05,837 WARN [BP-1387870917-172.17.0.3-1731522626275 heartbeating to localhost/127.0.0.1:39907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:05,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:05,837 WARN [BP-1387870917-172.17.0.3-1731522626275 heartbeating to localhost/127.0.0.1:39907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1387870917-172.17.0.3-1731522626275 (Datanode Uuid 8473ec5d-c1ae-4353-a507-1450353ac727) service to localhost/127.0.0.1:39907 2024-11-13T18:32:05,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:05,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data3/current/BP-1387870917-172.17.0.3-1731522626275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:05,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data4/current/BP-1387870917-172.17.0.3-1731522626275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:05,840 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:05,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:05,851 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:05,851 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:05,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:05,851 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:05,854 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:05,854 WARN [BP-1387870917-172.17.0.3-1731522626275 heartbeating to localhost/127.0.0.1:39907 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:05,854 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:05,854 WARN [BP-1387870917-172.17.0.3-1731522626275 heartbeating to localhost/127.0.0.1:39907 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1387870917-172.17.0.3-1731522626275 (Datanode Uuid c852c6d5-f0df-4483-8b96-bc1ded6e0976) service to localhost/127.0.0.1:39907 2024-11-13T18:32:05,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data1/current/BP-1387870917-172.17.0.3-1731522626275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:05,855 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/cluster_8f6a0e40-1664-fc91-8eb5-7d08b5ab2dea/data/data2/current/BP-1387870917-172.17.0.3-1731522626275 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:05,855 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:05,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:05,873 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:05,873 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:05,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:05,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:05,888 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:32:05,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:32:05,939 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=80 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39907 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/39e84130bbc9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39907 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3aaca39e java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39907 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39907 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/39e84130bbc9:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39907 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/39e84130bbc9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=376 (was 266) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3198 (was 3086) - AvailableMemoryMB LEAK? - 2024-11-13T18:32:05,949 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=81, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=376, ProcessCount=11, AvailableMemoryMB=3197 2024-11-13T18:32:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:32:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.log.dir so I do NOT create it in target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e 2024-11-13T18:32:05,950 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7be242d-bf27-45ba-bbf7-7e82adbf6900/hadoop.tmp.dir so I do NOT create it in target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1, deleteOnExit=true 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/test.cache.data in system properties and HBase conf 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:32:05,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:32:05,951 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:05,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:32:05,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:32:05,971 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:06,062 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:06,070 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:06,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:06,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:06,072 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:32:06,073 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:06,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:06,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:06,232 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3235d5ba{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/java.io.tmpdir/jetty-localhost-46737-hadoop-hdfs-3_4_1-tests_jar-_-any-13755655814356363266/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:06,237 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:46737} 2024-11-13T18:32:06,237 INFO [Time-limited test {}] server.Server(415): Started @102288ms 2024-11-13T18:32:06,256 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:06,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:06,368 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:06,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:06,369 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:06,369 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:32:06,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:06,371 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:06,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61e52b83{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/java.io.tmpdir/jetty-localhost-33767-hadoop-hdfs-3_4_1-tests_jar-_-any-3471353993916526018/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:06,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:33767} 2024-11-13T18:32:06,534 INFO [Time-limited test {}] server.Server(415): Started @102585ms 2024-11-13T18:32:06,536 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:06,596 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:06,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:06,604 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:06,604 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:06,604 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:06,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:06,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:06,669 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data1/current/BP-1847718472-172.17.0.3-1731522725992/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:06,669 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data2/current/BP-1847718472-172.17.0.3-1731522725992/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:06,702 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:06,705 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0169a38eb1c55e6 with lease ID 0xe2e3cb78b98b1777: Processing first storage report for DS-fe040dfc-dbfa-4d92-b7b8-0b7056dd8e10 from datanode DatanodeRegistration(127.0.0.1:39855, datanodeUuid=dcb7f8e4-3eb0-48d1-bd51-2252d63c0792, infoPort=44513, infoSecurePort=0, ipcPort=35501, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992) 2024-11-13T18:32:06,705 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0169a38eb1c55e6 with lease ID 0xe2e3cb78b98b1777: from storage DS-fe040dfc-dbfa-4d92-b7b8-0b7056dd8e10 node DatanodeRegistration(127.0.0.1:39855, datanodeUuid=dcb7f8e4-3eb0-48d1-bd51-2252d63c0792, infoPort=44513, infoSecurePort=0, ipcPort=35501, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:06,705 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb0169a38eb1c55e6 with lease ID 0xe2e3cb78b98b1777: Processing first storage report for DS-86b31487-1894-475f-846a-ae0f9e44d975 from datanode DatanodeRegistration(127.0.0.1:39855, datanodeUuid=dcb7f8e4-3eb0-48d1-bd51-2252d63c0792, infoPort=44513, infoSecurePort=0, ipcPort=35501, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992) 2024-11-13T18:32:06,705 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0169a38eb1c55e6 with lease ID 0xe2e3cb78b98b1777: from storage DS-86b31487-1894-475f-846a-ae0f9e44d975 node DatanodeRegistration(127.0.0.1:39855, datanodeUuid=dcb7f8e4-3eb0-48d1-bd51-2252d63c0792, infoPort=44513, infoSecurePort=0, ipcPort=35501, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:06,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cebd4b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/java.io.tmpdir/jetty-localhost-38533-hadoop-hdfs-3_4_1-tests_jar-_-any-3840900908414756668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:06,752 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:38533} 2024-11-13T18:32:06,752 INFO [Time-limited test {}] server.Server(415): Started @102803ms 2024-11-13T18:32:06,754 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:06,859 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data3/current/BP-1847718472-172.17.0.3-1731522725992/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:06,860 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data4/current/BP-1847718472-172.17.0.3-1731522725992/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:06,880 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca59048c8abc751d with lease ID 0xe2e3cb78b98b1778: Processing first storage report for DS-690340e9-bedf-4ae2-b9f6-e7c1c5b3442a from datanode DatanodeRegistration(127.0.0.1:44185, datanodeUuid=9f3aa70d-8758-43f6-8b9d-2a7e1a07093b, infoPort=45467, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992) 2024-11-13T18:32:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca59048c8abc751d with lease ID 0xe2e3cb78b98b1778: from storage DS-690340e9-bedf-4ae2-b9f6-e7c1c5b3442a node DatanodeRegistration(127.0.0.1:44185, datanodeUuid=9f3aa70d-8758-43f6-8b9d-2a7e1a07093b, infoPort=45467, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca59048c8abc751d with lease ID 0xe2e3cb78b98b1778: Processing first storage report for DS-ffb9b253-7581-4ae9-86e8-43db20db1d60 from datanode DatanodeRegistration(127.0.0.1:44185, datanodeUuid=9f3aa70d-8758-43f6-8b9d-2a7e1a07093b, infoPort=45467, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992) 2024-11-13T18:32:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca59048c8abc751d with lease ID 0xe2e3cb78b98b1778: from storage DS-ffb9b253-7581-4ae9-86e8-43db20db1d60 node DatanodeRegistration(127.0.0.1:44185, datanodeUuid=9f3aa70d-8758-43f6-8b9d-2a7e1a07093b, infoPort=45467, infoSecurePort=0, ipcPort=33671, storageInfo=lv=-57;cid=testClusterID;nsid=948129766;c=1731522725992), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:06,988 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e 2024-11-13T18:32:06,992 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/zookeeper_0, clientPort=57898, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:32:06,993 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57898 2024-11-13T18:32:06,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:06,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:07,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:07,017 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53 with version=8 2024-11-13T18:32:07,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:32:07,020 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:32:07,020 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:07,021 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40635 2024-11-13T18:32:07,023 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40635 connecting to ZooKeeper ensemble=127.0.0.1:57898 2024-11-13T18:32:07,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406350x0, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:07,030 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40635-0x100ed5f00f90000 connected 2024-11-13T18:32:07,046 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,049 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,052 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:07,052 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53, hbase.cluster.distributed=false 2024-11-13T18:32:07,056 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:07,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-13T18:32:07,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40635 2024-11-13T18:32:07,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40635 2024-11-13T18:32:07,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-13T18:32:07,058 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40635 2024-11-13T18:32:07,078 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:32:07,078 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:07,079 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35969 2024-11-13T18:32:07,081 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35969 connecting to ZooKeeper ensemble=127.0.0.1:57898 2024-11-13T18:32:07,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359690x0, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:07,092 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359690x0, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:07,092 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35969-0x100ed5f00f90001 connected 2024-11-13T18:32:07,092 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:32:07,093 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:32:07,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:32:07,095 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:07,101 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-13T18:32:07,102 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35969 2024-11-13T18:32:07,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35969 2024-11-13T18:32:07,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-13T18:32:07,110 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35969 2024-11-13T18:32:07,127 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:40635 2024-11-13T18:32:07,127 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:07,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:07,130 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:07,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,133 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:32:07,134 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,40635,1731522727019 from backup master directory 2024-11-13T18:32:07,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:07,136 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:07,136 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:07,141 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/hbase.id] with ID: ebb7c5e2-ea17-41e2-a230-3c10a0ab2d71 2024-11-13T18:32:07,141 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/.tmp/hbase.id 2024-11-13T18:32:07,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:07,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:07,154 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/.tmp/hbase.id]:[hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/hbase.id] 2024-11-13T18:32:07,172 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:07,173 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:32:07,175 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T18:32:07,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:07,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:07,186 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:32:07,187 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:32:07,189 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:07,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:07,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:07,205 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store 2024-11-13T18:32:07,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:07,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:07,218 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:07,218 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:07,218 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:07,218 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:07,219 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:07,219 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:07,219 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:07,219 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522727218Disabling compacts and flushes for region at 1731522727218Disabling writes for close at 1731522727219 (+1 ms)Writing region close event to WAL at 1731522727219Closed at 1731522727219 2024-11-13T18:32:07,221 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/.initializing 2024-11-13T18:32:07,221 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/WALs/39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,225 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C40635%2C1731522727019, suffix=, logDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/WALs/39e84130bbc9,40635,1731522727019, archiveDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/oldWALs, maxLogs=10 2024-11-13T18:32:07,226 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40635%2C1731522727019.1731522727225 2024-11-13T18:32:07,235 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/WALs/39e84130bbc9,40635,1731522727019/39e84130bbc9%2C40635%2C1731522727019.1731522727225 2024-11-13T18:32:07,244 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44513:44513),(127.0.0.1/127.0.0.1:45467:45467)] 2024-11-13T18:32:07,250 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:07,251 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:07,251 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,251 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,253 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:32:07,255 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,256 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:07,257 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:32:07,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:07,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:32:07,268 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:07,269 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:32:07,271 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:07,272 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,273 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,274 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,277 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,277 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,278 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:32:07,280 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:07,286 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:07,287 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860447, jitterRate=0.0941154956817627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:32:07,288 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522727251Initializing all the Stores at 1731522727253 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522727253Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522727253Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522727253Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522727253Cleaning up temporary data from old regions at 1731522727277 (+24 ms)Region opened successfully at 1731522727288 (+11 ms) 2024-11-13T18:32:07,293 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:32:07,299 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22fb147a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:07,300 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:32:07,301 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:32:07,301 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:32:07,301 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:32:07,302 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:32:07,302 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:32:07,302 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:32:07,305 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:32:07,306 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:32:07,308 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:32:07,309 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:32:07,310 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:32:07,311 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:32:07,312 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:32:07,313 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:32:07,315 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:32:07,316 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:32:07,317 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:32:07,320 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:32:07,322 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:32:07,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:07,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:07,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,324 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,40635,1731522727019, sessionid=0x100ed5f00f90000, setting cluster-up flag (Was=false) 2024-11-13T18:32:07,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,334 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:32:07,335 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,344 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:32:07,346 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40635,1731522727019 2024-11-13T18:32:07,347 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:32:07,349 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:07,350 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:32:07,350 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:32:07,350 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,40635,1731522727019 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,352 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:07,353 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522757354 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:32:07,354 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:32:07,355 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:32:07,355 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:07,355 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:32:07,355 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,356 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:32:07,356 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:32:07,356 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:32:07,356 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:32:07,356 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:32:07,356 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,357 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:32:07,357 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522727356,5,FailOnTimeoutGroup] 2024-11-13T18:32:07,360 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522727357,5,FailOnTimeoutGroup] 2024-11-13T18:32:07,361 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,361 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:32:07,361 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,361 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:07,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:07,369 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:32:07,369 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53 2024-11-13T18:32:07,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:32:07,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:32:07,379 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:07,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:07,382 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:07,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:07,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:07,385 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:07,385 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:07,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:07,388 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:07,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:07,389 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:07,391 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:07,391 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:07,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:07,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:07,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740 2024-11-13T18:32:07,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740 2024-11-13T18:32:07,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:07,395 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:07,396 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:07,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:07,406 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:07,406 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727773, jitterRate=-0.07458893954753876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:07,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522727379Initializing all the Stores at 1731522727380 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522727380Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522727380Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522727380Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522727380Cleaning up temporary data from old regions at 1731522727395 (+15 ms)Region opened successfully at 1731522727408 (+13 ms) 2024-11-13T18:32:07,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:07,410 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:07,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:07,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:07,410 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:07,414 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:07,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522727410Disabling compacts and flushes for region at 1731522727410Disabling writes for close at 1731522727410Writing region close event to WAL at 1731522727414 (+4 ms)Closed at 1731522727414 2024-11-13T18:32:07,415 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(746): ClusterId : ebb7c5e2-ea17-41e2-a230-3c10a0ab2d71 2024-11-13T18:32:07,415 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:32:07,420 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:07,420 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:32:07,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:32:07,423 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:07,425 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:32:07,430 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:32:07,431 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:32:07,450 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:32:07,451 DEBUG [RS:0;39e84130bbc9:35969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c12a66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:07,470 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:35969 2024-11-13T18:32:07,470 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:32:07,470 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:32:07,471 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:32:07,473 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,40635,1731522727019 with port=35969, startcode=1731522727077 2024-11-13T18:32:07,473 DEBUG [RS:0;39e84130bbc9:35969 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:32:07,479 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46581, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:32:07,479 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40635 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,480 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40635 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,483 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53 2024-11-13T18:32:07,483 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:32945 2024-11-13T18:32:07,483 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:32:07,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:07,487 DEBUG [RS:0;39e84130bbc9:35969 {}] zookeeper.ZKUtil(111): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,487 WARN [RS:0;39e84130bbc9:35969 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:07,487 INFO [RS:0;39e84130bbc9:35969 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:07,487 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/WALs/39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,501 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,35969,1731522727077] 2024-11-13T18:32:07,508 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:32:07,512 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:32:07,512 INFO [RS:0;39e84130bbc9:35969 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:32:07,513 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,513 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:32:07,515 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:32:07,515 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,515 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,515 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,515 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,515 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:07,516 DEBUG [RS:0;39e84130bbc9:35969 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,522 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,35969,1731522727077-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:07,540 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:32:07,541 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,35969,1731522727077-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,541 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,541 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.Replication(171): 39e84130bbc9,35969,1731522727077 started 2024-11-13T18:32:07,557 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:07,558 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,35969,1731522727077, RpcServer on 39e84130bbc9/172.17.0.3:35969, sessionid=0x100ed5f00f90001 2024-11-13T18:32:07,558 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:32:07,558 DEBUG [RS:0;39e84130bbc9:35969 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,558 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,35969,1731522727077' 2024-11-13T18:32:07,558 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:32:07,559 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,35969,1731522727077' 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:32:07,560 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:32:07,561 DEBUG [RS:0;39e84130bbc9:35969 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:32:07,561 INFO [RS:0;39e84130bbc9:35969 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:32:07,561 INFO [RS:0;39e84130bbc9:35969 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:32:07,575 WARN [39e84130bbc9:40635 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:32:07,664 INFO [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C35969%2C1731522727077, suffix=, logDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/WALs/39e84130bbc9,35969,1731522727077, archiveDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/oldWALs, maxLogs=32 2024-11-13T18:32:07,667 INFO [RS:0;39e84130bbc9:35969 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35969%2C1731522727077.1731522727667 2024-11-13T18:32:07,676 INFO [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/WALs/39e84130bbc9,35969,1731522727077/39e84130bbc9%2C35969%2C1731522727077.1731522727667 2024-11-13T18:32:07,677 DEBUG [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45467:45467),(127.0.0.1/127.0.0.1:44513:44513)] 2024-11-13T18:32:07,826 DEBUG [39e84130bbc9:40635 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:32:07,827 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,35969,1731522727077 2024-11-13T18:32:07,829 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,35969,1731522727077, state=OPENING 2024-11-13T18:32:07,831 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:32:07,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:07,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:07,835 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:07,835 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:07,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,35969,1731522727077}] 2024-11-13T18:32:07,990 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:32:07,992 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34065, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:32:07,997 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:32:07,998 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:08,000 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C35969%2C1731522727077.meta, suffix=.meta, logDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/WALs/39e84130bbc9,35969,1731522727077, archiveDir=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/oldWALs, maxLogs=32 2024-11-13T18:32:08,002 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35969%2C1731522727077.meta.1731522728002.meta 2024-11-13T18:32:08,011 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/WALs/39e84130bbc9,35969,1731522727077/39e84130bbc9%2C35969%2C1731522727077.meta.1731522728002.meta 2024-11-13T18:32:08,012 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45467:45467),(127.0.0.1/127.0.0.1:44513:44513)] 2024-11-13T18:32:08,013 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:08,013 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:32:08,013 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:32:08,014 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:32:08,014 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:32:08,014 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:08,014 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:32:08,014 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:32:08,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:08,021 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:08,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:08,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:08,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:08,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:08,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:08,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:08,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:08,026 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:08,026 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:08,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:08,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:08,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:08,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:08,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:08,029 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:08,030 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740 2024-11-13T18:32:08,031 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740 2024-11-13T18:32:08,034 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:08,034 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:08,034 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:08,036 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:08,037 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709865, jitterRate=-0.09736104309558868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:08,037 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:32:08,038 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522728014Writing region info on filesystem at 1731522728014Initializing all the Stores at 1731522728016 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522728016Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522728017 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522728017Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522728017Cleaning up temporary data from old regions at 1731522728034 (+17 ms)Running coprocessor post-open hooks at 1731522728037 (+3 ms)Region opened successfully at 1731522728038 (+1 ms) 2024-11-13T18:32:08,040 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522727989 2024-11-13T18:32:08,043 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:32:08,043 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:32:08,044 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,35969,1731522727077 2024-11-13T18:32:08,046 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,35969,1731522727077, state=OPEN 2024-11-13T18:32:08,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:08,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:08,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:08,347 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:08,347 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,35969,1731522727077 2024-11-13T18:32:08,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:32:08,352 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,35969,1731522727077 in 512 msec 2024-11-13T18:32:08,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:32:08,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 932 msec 2024-11-13T18:32:08,357 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:08,357 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:32:08,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:08,359 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,35969,1731522727077, seqNum=-1] 2024-11-13T18:32:08,360 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:08,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44973, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:08,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0210 sec 2024-11-13T18:32:08,373 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522728373, completionTime=-1 2024-11-13T18:32:08,373 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:32:08,373 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522788376 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522848376 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:40635, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,376 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,377 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,379 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.247sec 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:08,383 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:32:08,387 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:32:08,387 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:32:08,387 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40635,1731522727019-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:08,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:08,415 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,40635,-1 for getting cluster id 2024-11-13T18:32:08,416 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:32:08,422 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ebb7c5e2-ea17-41e2-a230-3c10a0ab2d71' 2024-11-13T18:32:08,423 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:32:08,423 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ebb7c5e2-ea17-41e2-a230-3c10a0ab2d71" 2024-11-13T18:32:08,424 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367dc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:08,424 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,40635,-1] 2024-11-13T18:32:08,424 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:32:08,437 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,439 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59450, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:32:08,440 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f14acfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:08,441 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:08,442 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,35969,1731522727077, seqNum=-1] 2024-11-13T18:32:08,442 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:08,444 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51742, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:08,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,40635,1731522727019 2024-11-13T18:32:08,446 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:08,450 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:32:08,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:32:08,450 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:08,450 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:08,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,451 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:32:08,451 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:32:08,451 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469549761, stopped=false 2024-11-13T18:32:08,451 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,40635,1731522727019 2024-11-13T18:32:08,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:08,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:08,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:08,585 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:08,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:08,586 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:08,586 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:08,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,586 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:08,586 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:08,586 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,35969,1731522727077' ***** 2024-11-13T18:32:08,587 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,35969,1731522727077 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:08,587 INFO [RS:0;39e84130bbc9:35969 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:35969. 2024-11-13T18:32:08,587 DEBUG [RS:0;39e84130bbc9:35969 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:08,587 DEBUG [RS:0;39e84130bbc9:35969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,588 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:32:08,588 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:32:08,588 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:32:08,588 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:32:08,588 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:32:08,588 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T18:32:08,588 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T18:32:08,588 DEBUG [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T18:32:08,588 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:08,588 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:08,588 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:08,588 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:08,588 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:08,589 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T18:32:08,607 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/.tmp/ns/7f5a3bed676f49da9f35788fd49f6705 is 43, key is default/ns:d/1731522728363/Put/seqid=0 2024-11-13T18:32:08,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741835_1011 (size=5153) 2024-11-13T18:32:08,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741835_1011 (size=5153) 2024-11-13T18:32:08,650 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/.tmp/ns/7f5a3bed676f49da9f35788fd49f6705 2024-11-13T18:32:08,658 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/.tmp/ns/7f5a3bed676f49da9f35788fd49f6705 as hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/ns/7f5a3bed676f49da9f35788fd49f6705 2024-11-13T18:32:08,666 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/ns/7f5a3bed676f49da9f35788fd49f6705, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T18:32:08,668 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 80ms, sequenceid=6, compaction requested=false 2024-11-13T18:32:08,668 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T18:32:08,679 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T18:32:08,680 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:08,680 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:08,680 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522728588Running coprocessor pre-close hooks at 1731522728588Disabling compacts and flushes for region at 1731522728588Disabling writes for close at 1731522728588Obtaining lock to block concurrent updates at 1731522728589 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731522728589Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731522728589Flushing stores of hbase:meta,,1.1588230740 at 1731522728590 (+1 ms)Flushing 1588230740/ns: creating writer at 1731522728590Flushing 1588230740/ns: appending metadata at 1731522728607 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731522728607Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47192ebd: reopening flushed file at 1731522728657 (+50 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 80ms, sequenceid=6, compaction requested=false at 1731522728668 (+11 ms)Writing region close event to WAL at 1731522728674 (+6 ms)Running coprocessor post-close hooks at 1731522728680 (+6 ms)Closed at 1731522728680 2024-11-13T18:32:08,680 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:08,788 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,35969,1731522727077; all regions closed. 2024-11-13T18:32:08,789 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,789 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,790 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,790 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,790 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741834_1010 (size=1152) 2024-11-13T18:32:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741834_1010 (size=1152) 2024-11-13T18:32:08,802 DEBUG [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/oldWALs 2024-11-13T18:32:08,802 INFO [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C35969%2C1731522727077.meta:.meta(num 1731522728002) 2024-11-13T18:32:08,803 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,803 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,803 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,804 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,804 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:08,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741833_1009 (size=93) 2024-11-13T18:32:08,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741833_1009 (size=93) 2024-11-13T18:32:08,813 DEBUG [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/oldWALs 2024-11-13T18:32:08,813 INFO [RS:0;39e84130bbc9:35969 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C35969%2C1731522727077:(num 1731522727667) 2024-11-13T18:32:08,813 DEBUG [RS:0;39e84130bbc9:35969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:08,814 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:08,814 INFO [RS:0;39e84130bbc9:35969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:08,814 INFO [RS:0;39e84130bbc9:35969 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:08,814 INFO [RS:0;39e84130bbc9:35969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:08,814 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:08,815 INFO [RS:0;39e84130bbc9:35969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35969 2024-11-13T18:32:08,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:08,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,35969,1731522727077 2024-11-13T18:32:08,855 INFO [RS:0;39e84130bbc9:35969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:08,863 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,35969,1731522727077] 2024-11-13T18:32:08,870 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,35969,1731522727077 already deleted, retry=false 2024-11-13T18:32:08,870 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,35969,1731522727077 expired; onlineServers=0 2024-11-13T18:32:08,870 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,40635,1731522727019' ***** 2024-11-13T18:32:08,870 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:32:08,870 INFO [M:0;39e84130bbc9:40635 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:08,871 INFO [M:0;39e84130bbc9:40635 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:08,871 DEBUG [M:0;39e84130bbc9:40635 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:32:08,871 DEBUG [M:0;39e84130bbc9:40635 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:32:08,871 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:32:08,871 INFO [M:0;39e84130bbc9:40635 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:08,871 INFO [M:0;39e84130bbc9:40635 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:08,871 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522727357 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522727357,5,FailOnTimeoutGroup] 2024-11-13T18:32:08,871 DEBUG [M:0;39e84130bbc9:40635 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:32:08,871 INFO [M:0;39e84130bbc9:40635 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:32:08,871 INFO [M:0;39e84130bbc9:40635 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:08,872 INFO [M:0;39e84130bbc9:40635 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:32:08,872 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:32:08,873 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522727356 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522727356,5,FailOnTimeoutGroup] 2024-11-13T18:32:08,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:08,878 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:08,878 DEBUG [M:0;39e84130bbc9:40635 {}] zookeeper.ZKUtil(347): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:32:08,878 WARN [M:0;39e84130bbc9:40635 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:32:08,879 INFO [M:0;39e84130bbc9:40635 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/.lastflushedseqids 2024-11-13T18:32:08,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741836_1012 (size=99) 2024-11-13T18:32:08,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741836_1012 (size=99) 2024-11-13T18:32:08,889 INFO [M:0;39e84130bbc9:40635 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:32:08,889 INFO [M:0;39e84130bbc9:40635 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:32:08,889 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:08,889 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:08,889 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:08,889 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:08,889 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:08,890 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T18:32:08,919 DEBUG [M:0;39e84130bbc9:40635 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5608d01efe6944d7b0647b0285d76eb7 is 82, key is hbase:meta,,1/info:regioninfo/1731522728044/Put/seqid=0 2024-11-13T18:32:08,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741837_1013 (size=5672) 2024-11-13T18:32:08,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741837_1013 (size=5672) 2024-11-13T18:32:08,928 INFO [M:0;39e84130bbc9:40635 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5608d01efe6944d7b0647b0285d76eb7 2024-11-13T18:32:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:08,963 INFO [RS:0;39e84130bbc9:35969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:08,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35969-0x100ed5f00f90001, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:08,963 INFO [RS:0;39e84130bbc9:35969 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,35969,1731522727077; zookeeper connection closed. 2024-11-13T18:32:08,964 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2bfc94dc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2bfc94dc 2024-11-13T18:32:08,965 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:32:08,968 DEBUG [M:0;39e84130bbc9:40635 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78c5a5cc2e76420c9f9e8e6ac3706150 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731522728371/Put/seqid=0 2024-11-13T18:32:08,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741838_1014 (size=5275) 2024-11-13T18:32:08,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741838_1014 (size=5275) 2024-11-13T18:32:08,976 INFO [M:0;39e84130bbc9:40635 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78c5a5cc2e76420c9f9e8e6ac3706150 2024-11-13T18:32:09,006 DEBUG [M:0;39e84130bbc9:40635 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/695c5dfbf8324a6f9dd9334563a2cf60 is 69, key is 39e84130bbc9,35969,1731522727077/rs:state/1731522727480/Put/seqid=0 2024-11-13T18:32:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741839_1015 (size=5156) 2024-11-13T18:32:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741839_1015 (size=5156) 2024-11-13T18:32:09,017 INFO [M:0;39e84130bbc9:40635 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/695c5dfbf8324a6f9dd9334563a2cf60 2024-11-13T18:32:09,045 DEBUG [M:0;39e84130bbc9:40635 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34a6af9f03914b34a0d25636f5c2c147 is 52, key is load_balancer_on/state:d/1731522728448/Put/seqid=0 2024-11-13T18:32:09,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741840_1016 (size=5056) 2024-11-13T18:32:09,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741840_1016 (size=5056) 2024-11-13T18:32:09,054 INFO [M:0;39e84130bbc9:40635 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34a6af9f03914b34a0d25636f5c2c147 2024-11-13T18:32:09,062 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5608d01efe6944d7b0647b0285d76eb7 as hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5608d01efe6944d7b0647b0285d76eb7 2024-11-13T18:32:09,070 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5608d01efe6944d7b0647b0285d76eb7, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T18:32:09,071 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/78c5a5cc2e76420c9f9e8e6ac3706150 as hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78c5a5cc2e76420c9f9e8e6ac3706150 2024-11-13T18:32:09,078 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/78c5a5cc2e76420c9f9e8e6ac3706150, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T18:32:09,079 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/695c5dfbf8324a6f9dd9334563a2cf60 as hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/695c5dfbf8324a6f9dd9334563a2cf60 2024-11-13T18:32:09,087 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/695c5dfbf8324a6f9dd9334563a2cf60, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T18:32:09,088 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/34a6af9f03914b34a0d25636f5c2c147 as hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/34a6af9f03914b34a0d25636f5c2c147 2024-11-13T18:32:09,097 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:32945/user/jenkins/test-data/5ed88e6d-bdeb-dc9b-fad8-d2c06e94dc53/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/34a6af9f03914b34a0d25636f5c2c147, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T18:32:09,098 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 208ms, sequenceid=29, compaction requested=false 2024-11-13T18:32:09,100 INFO [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:09,101 DEBUG [M:0;39e84130bbc9:40635 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522728889Disabling compacts and flushes for region at 1731522728889Disabling writes for close at 1731522728889Obtaining lock to block concurrent updates at 1731522728890 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522728890Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731522728890Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522728892 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522728892Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522728918 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522728918Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522728940 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522728967 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522728967Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522728983 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522729005 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522729005Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522729025 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522729044 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522729044Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25793cd7: reopening flushed file at 1731522729060 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c0bb75: reopening flushed file at 1731522729070 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7352693e: reopening flushed file at 1731522729078 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@469b0557: reopening flushed file at 1731522729087 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 208ms, sequenceid=29, compaction requested=false at 1731522729099 (+12 ms)Writing region close event to WAL at 1731522729100 (+1 ms)Closed at 1731522729100 2024-11-13T18:32:09,101 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:09,101 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:09,102 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:09,102 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:09,102 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:09,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44185 is added to blk_1073741830_1006 (size=10311) 2024-11-13T18:32:09,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39855 is added to blk_1073741830_1006 (size=10311) 2024-11-13T18:32:09,107 INFO [M:0;39e84130bbc9:40635 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:32:09,107 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:09,107 INFO [M:0;39e84130bbc9:40635 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40635 2024-11-13T18:32:09,107 INFO [M:0;39e84130bbc9:40635 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:09,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:09,211 INFO [M:0;39e84130bbc9:40635 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40635-0x100ed5f00f90000, quorum=127.0.0.1:57898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:09,222 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cebd4b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:09,223 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e6ff2f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:09,223 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:09,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f27499{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:09,223 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:09,225 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:09,225 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:09,226 WARN [BP-1847718472-172.17.0.3-1731522725992 heartbeating to localhost/127.0.0.1:32945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:09,226 WARN [BP-1847718472-172.17.0.3-1731522725992 heartbeating to localhost/127.0.0.1:32945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1847718472-172.17.0.3-1731522725992 (Datanode Uuid 9f3aa70d-8758-43f6-8b9d-2a7e1a07093b) service to localhost/127.0.0.1:32945 2024-11-13T18:32:09,226 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data3/current/BP-1847718472-172.17.0.3-1731522725992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:09,227 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data4/current/BP-1847718472-172.17.0.3-1731522725992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:09,227 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:09,229 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61e52b83{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:09,230 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29a18ee0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:09,230 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:09,230 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@670e4080{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:09,230 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:09,232 WARN [BP-1847718472-172.17.0.3-1731522725992 heartbeating to localhost/127.0.0.1:32945 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:09,232 WARN [BP-1847718472-172.17.0.3-1731522725992 heartbeating to localhost/127.0.0.1:32945 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1847718472-172.17.0.3-1731522725992 (Datanode Uuid dcb7f8e4-3eb0-48d1-bd51-2252d63c0792) service to localhost/127.0.0.1:32945 2024-11-13T18:32:09,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data1/current/BP-1847718472-172.17.0.3-1731522725992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:09,233 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/cluster_d2848141-1259-42d8-2368-acbfa81372d1/data/data2/current/BP-1847718472-172.17.0.3-1731522725992 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:09,233 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:09,233 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:09,233 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:09,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3235d5ba{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:09,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347af0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:09,241 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:09,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a69944b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:09,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:09,253 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.log.dir so I do NOT create it in target/test-data/ad8670c4-e599-72f2-a098-d9558c868335 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8282a431-71fc-eed2-eadd-6f1b6334442e/hadoop.tmp.dir so I do NOT create it in target/test-data/ad8670c4-e599-72f2-a098-d9558c868335 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06, deleteOnExit=true 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/test.cache.data in system properties and HBase conf 2024-11-13T18:32:09,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:32:09,276 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:09,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:32:09,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:32:09,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:09,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:32:09,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:32:09,293 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:09,380 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:09,388 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:09,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:09,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:09,396 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:09,397 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:09,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c48ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:09,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35c95cb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:09,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5519c514{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-33515-hadoop-hdfs-3_4_1-tests_jar-_-any-16985276829888520049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:09,523 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:09,524 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47f2ada2{HTTP/1.1, (http/1.1)}{localhost:33515} 2024-11-13T18:32:09,524 INFO [Time-limited test {}] server.Server(415): Started @105574ms 2024-11-13T18:32:09,539 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:09,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:09,637 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:09,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:09,642 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:09,642 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:09,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3d2a60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:09,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5917cb43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:09,770 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1aa07d80{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-33997-hadoop-hdfs-3_4_1-tests_jar-_-any-2695604619510153567/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:09,771 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c814f59{HTTP/1.1, (http/1.1)}{localhost:33997} 2024-11-13T18:32:09,771 INFO [Time-limited test {}] server.Server(415): Started @105822ms 2024-11-13T18:32:09,773 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:09,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:09,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:32:09,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T18:32:09,796 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T18:32:09,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:09,833 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:09,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:09,834 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:09,834 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:09,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198c3788{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:09,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6471b09b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:09,883 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data1/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:09,883 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data2/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:09,916 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:09,920 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7afe2fa10c2956f with lease ID 0x910f12e34a83dc9: Processing first storage report for DS-041f18ee-f73b-44ec-940b-f43c5f131700 from datanode DatanodeRegistration(127.0.0.1:41943, datanodeUuid=ce824297-27bf-412e-9949-12043232fc2b, infoPort=45635, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:09,920 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7afe2fa10c2956f with lease ID 0x910f12e34a83dc9: from storage DS-041f18ee-f73b-44ec-940b-f43c5f131700 node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=ce824297-27bf-412e-9949-12043232fc2b, infoPort=45635, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:09,920 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7afe2fa10c2956f with lease ID 0x910f12e34a83dc9: Processing first storage report for DS-d54aad6b-7727-4bf7-a3ad-46ff588c1dbd from datanode DatanodeRegistration(127.0.0.1:41943, datanodeUuid=ce824297-27bf-412e-9949-12043232fc2b, infoPort=45635, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:09,920 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7afe2fa10c2956f with lease ID 0x910f12e34a83dc9: from storage DS-d54aad6b-7727-4bf7-a3ad-46ff588c1dbd node DatanodeRegistration(127.0.0.1:41943, datanodeUuid=ce824297-27bf-412e-9949-12043232fc2b, infoPort=45635, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:09,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55c8142a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-34827-hadoop-hdfs-3_4_1-tests_jar-_-any-15282845121579979951/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:09,985 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65f2c48f{HTTP/1.1, (http/1.1)}{localhost:34827} 2024-11-13T18:32:09,985 INFO [Time-limited test {}] server.Server(415): Started @106035ms 2024-11-13T18:32:09,986 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:10,078 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data4/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:10,078 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data3/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:10,100 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3325ebff6f189423 with lease ID 0x910f12e34a83dca: Processing first storage report for DS-086cb97f-6415-4953-a114-670ffd0d120a from datanode DatanodeRegistration(127.0.0.1:39111, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=42839, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3325ebff6f189423 with lease ID 0x910f12e34a83dca: from storage DS-086cb97f-6415-4953-a114-670ffd0d120a node DatanodeRegistration(127.0.0.1:39111, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=42839, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3325ebff6f189423 with lease ID 0x910f12e34a83dca: Processing first storage report for DS-babbce77-2d5c-44b8-b904-19e327a01341 from datanode DatanodeRegistration(127.0.0.1:39111, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=42839, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3325ebff6f189423 with lease ID 0x910f12e34a83dca: from storage DS-babbce77-2d5c-44b8-b904-19e327a01341 node DatanodeRegistration(127.0.0.1:39111, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=42839, infoSecurePort=0, ipcPort=36925, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:10,115 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335 2024-11-13T18:32:10,120 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/zookeeper_0, clientPort=59783, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:32:10,121 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59783 2024-11-13T18:32:10,121 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,124 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:10,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:10,151 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e with version=8 2024-11-13T18:32:10,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:32:10,154 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:10,154 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:32:10,155 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:10,156 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:32899 2024-11-13T18:32:10,158 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32899 connecting to ZooKeeper ensemble=127.0.0.1:59783 2024-11-13T18:32:10,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328990x0, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:10,167 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32899-0x100ed5f0d300000 connected 2024-11-13T18:32:10,198 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,200 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,203 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:10,203 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e, hbase.cluster.distributed=false 2024-11-13T18:32:10,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:10,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32899 2024-11-13T18:32:10,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32899 2024-11-13T18:32:10,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32899 2024-11-13T18:32:10,218 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32899 2024-11-13T18:32:10,219 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32899 2024-11-13T18:32:10,238 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:10,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,239 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:10,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:10,239 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:10,239 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:32:10,239 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:10,240 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40543 2024-11-13T18:32:10,242 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40543 connecting to ZooKeeper ensemble=127.0.0.1:59783 2024-11-13T18:32:10,242 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,245 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405430x0, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:10,250 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405430x0, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:10,250 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40543-0x100ed5f0d300001 connected 2024-11-13T18:32:10,250 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:32:10,251 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:32:10,252 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:32:10,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:10,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40543 2024-11-13T18:32:10,254 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40543 2024-11-13T18:32:10,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40543 2024-11-13T18:32:10,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40543 2024-11-13T18:32:10,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40543 2024-11-13T18:32:10,271 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:32899 2024-11-13T18:32:10,272 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:10,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:10,275 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,278 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:32:10,278 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,32899,1731522730153 from backup master directory 2024-11-13T18:32:10,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:10,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:10,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:10,282 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:10,282 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,293 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/hbase.id] with ID: 4ddbe52d-2189-4809-8d08-a34688978f1b 2024-11-13T18:32:10,293 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/.tmp/hbase.id 2024-11-13T18:32:10,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:10,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:10,302 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/.tmp/hbase.id]:[hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/hbase.id] 2024-11-13T18:32:10,326 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:10,326 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:32:10,328 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T18:32:10,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:10,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:10,345 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:32:10,346 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:32:10,346 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:10,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:10,646 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:32:10,648 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,674 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:10,758 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store 2024-11-13T18:32:10,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:10,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:10,768 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:10,768 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:10,768 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522730768Disabling compacts and flushes for region at 1731522730768Disabling writes for close at 1731522730768Writing region close event to WAL at 1731522730768Closed at 1731522730768 2024-11-13T18:32:10,770 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/.initializing 2024-11-13T18:32:10,770 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,774 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C32899%2C1731522730153, suffix=, logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153, archiveDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/oldWALs, maxLogs=10 2024-11-13T18:32:10,774 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C32899%2C1731522730153.1731522730774 2024-11-13T18:32:10,780 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 2024-11-13T18:32:10,785 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42839:42839),(127.0.0.1/127.0.0.1:45635:45635)] 2024-11-13T18:32:10,786 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:10,786 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:10,786 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,787 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:32:10,790 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:10,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:10,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,792 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:32:10,792 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:10,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:10,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:32:10,796 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:10,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:10,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:32:10,799 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:10,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:10,799 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,800 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,800 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,802 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,802 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,803 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:32:10,804 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:10,807 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:10,808 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706530, jitterRate=-0.10160095989704132}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:32:10,809 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522730787Initializing all the Stores at 1731522730788 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522730788Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522730788Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522730788Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522730788Cleaning up temporary data from old regions at 1731522730802 (+14 ms)Region opened successfully at 1731522730809 (+7 ms) 2024-11-13T18:32:10,809 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:32:10,814 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12d2222f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:10,815 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:32:10,815 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:32:10,815 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:32:10,815 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:32:10,816 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:32:10,816 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:32:10,816 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:32:10,818 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:32:10,819 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:32:10,821 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:32:10,822 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:32:10,823 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:32:10,824 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:32:10,825 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:32:10,825 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:32:10,827 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:32:10,828 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:32:10,829 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:32:10,832 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:32:10,833 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:32:10,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:10,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:10,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,835 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,32899,1731522730153, sessionid=0x100ed5f0d300000, setting cluster-up flag (Was=false) 2024-11-13T18:32:10,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,844 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:32:10,845 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:10,859 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:32:10,861 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,32899,1731522730153 2024-11-13T18:32:10,863 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:32:10,865 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:10,865 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:32:10,865 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:32:10,866 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,32899,1731522730153 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:10,868 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:10,870 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:10,871 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:32:10,872 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:10,872 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:32:10,873 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522760873 2024-11-13T18:32:10,873 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:32:10,873 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:32:10,874 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:32:10,875 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:32:10,875 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:32:10,875 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:32:10,875 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522730875,5,FailOnTimeoutGroup] 2024-11-13T18:32:10,875 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522730875,5,FailOnTimeoutGroup] 2024-11-13T18:32:10,876 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:10,876 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:32:10,876 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:10,876 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:10,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:10,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:10,959 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(746): ClusterId : 4ddbe52d-2189-4809-8d08-a34688978f1b 2024-11-13T18:32:10,959 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:32:10,963 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:32:10,963 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:32:10,966 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:32:10,966 DEBUG [RS:0;39e84130bbc9:40543 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7576a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:10,986 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:40543 2024-11-13T18:32:10,987 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:32:10,987 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:32:10,987 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:32:10,988 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,32899,1731522730153 with port=40543, startcode=1731522730238 2024-11-13T18:32:10,988 DEBUG [RS:0;39e84130bbc9:40543 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:32:10,991 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56027, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:32:10,991 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32899 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,40543,1731522730238 2024-11-13T18:32:10,992 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32899 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:10,995 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e 2024-11-13T18:32:10,995 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34359 2024-11-13T18:32:10,995 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:32:10,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:10,999 DEBUG [RS:0;39e84130bbc9:40543 {}] zookeeper.ZKUtil(111): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,40543,1731522730238 2024-11-13T18:32:10,999 WARN [RS:0;39e84130bbc9:40543 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:10,999 INFO [RS:0;39e84130bbc9:40543 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:11,000 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,002 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,40543,1731522730238] 2024-11-13T18:32:11,013 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:32:11,017 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:32:11,017 INFO [RS:0;39e84130bbc9:40543 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:32:11,017 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,018 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:32:11,019 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:32:11,019 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,019 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,019 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,019 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,019 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,019 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:11,020 DEBUG [RS:0;39e84130bbc9:40543 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,021 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40543,1731522730238-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:11,038 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:32:11,038 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40543,1731522730238-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,038 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,039 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.Replication(171): 39e84130bbc9,40543,1731522730238 started 2024-11-13T18:32:11,057 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,057 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,40543,1731522730238, RpcServer on 39e84130bbc9/172.17.0.3:40543, sessionid=0x100ed5f0d300001 2024-11-13T18:32:11,057 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:32:11,057 DEBUG [RS:0;39e84130bbc9:40543 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,057 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,40543,1731522730238' 2024-11-13T18:32:11,057 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:32:11,058 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:32:11,059 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:32:11,060 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:32:11,060 DEBUG [RS:0;39e84130bbc9:40543 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,060 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,40543,1731522730238' 2024-11-13T18:32:11,060 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:32:11,060 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:32:11,061 DEBUG [RS:0;39e84130bbc9:40543 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:32:11,061 INFO [RS:0;39e84130bbc9:40543 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:32:11,061 INFO [RS:0;39e84130bbc9:40543 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:32:11,164 INFO [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C40543%2C1731522730238, suffix=, logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238, archiveDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs, maxLogs=32 2024-11-13T18:32:11,194 INFO [RS:0;39e84130bbc9:40543 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522731193 2024-11-13T18:32:11,205 INFO [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 2024-11-13T18:32:11,211 DEBUG [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42839:42839),(127.0.0.1/127.0.0.1:45635:45635)] 2024-11-13T18:32:11,289 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:32:11,289 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e 2024-11-13T18:32:11,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741833_1009 (size=32) 2024-11-13T18:32:11,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741833_1009 (size=32) 2024-11-13T18:32:11,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:11,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:11,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:11,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:11,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:11,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:11,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:11,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:11,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:11,309 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,310 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:11,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740 2024-11-13T18:32:11,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740 2024-11-13T18:32:11,313 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:11,313 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:11,314 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:11,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:11,318 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:11,318 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699490, jitterRate=-0.1105535626411438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:11,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522731298Initializing all the Stores at 1731522731299 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731299Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731299Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522731299Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731299Cleaning up temporary data from old regions at 1731522731313 (+14 ms)Region opened successfully at 1731522731319 (+6 ms) 2024-11-13T18:32:11,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:11,319 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:11,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:11,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:11,319 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:11,320 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:11,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522731319Disabling compacts and flushes for region at 1731522731319Disabling writes for close at 1731522731319Writing region close event to WAL at 1731522731320 (+1 ms)Closed at 1731522731320 2024-11-13T18:32:11,322 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:11,322 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:32:11,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:32:11,324 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:11,325 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:32:11,476 DEBUG [39e84130bbc9:32899 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:32:11,476 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,478 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,40543,1731522730238, state=OPENING 2024-11-13T18:32:11,480 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:32:11,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:11,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:11,485 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:11,485 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:11,485 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:11,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,40543,1731522730238}] 2024-11-13T18:32:11,640 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:32:11,642 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49953, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:32:11,646 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:32:11,647 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:11,649 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C40543%2C1731522730238.meta, suffix=.meta, logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238, archiveDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs, maxLogs=32 2024-11-13T18:32:11,650 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta 2024-11-13T18:32:11,656 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta 2024-11-13T18:32:11,662 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42839:42839),(127.0.0.1/127.0.0.1:45635:45635)] 2024-11-13T18:32:11,667 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:32:11,668 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:32:11,668 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:32:11,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:11,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:11,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:11,673 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:11,673 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:11,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:11,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:11,676 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:11,676 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:11,676 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:11,677 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740 2024-11-13T18:32:11,678 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740 2024-11-13T18:32:11,680 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:11,680 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:11,680 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:11,682 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:11,683 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854587, jitterRate=0.0866643637418747}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:11,683 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:32:11,683 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522731668Writing region info on filesystem at 1731522731668Initializing all the Stores at 1731522731669 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731669Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731670 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522731670Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522731670Cleaning up temporary data from old regions at 1731522731680 (+10 ms)Running coprocessor post-open hooks at 1731522731683 (+3 ms)Region opened successfully at 1731522731683 2024-11-13T18:32:11,685 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522731639 2024-11-13T18:32:11,688 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:32:11,688 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:32:11,689 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,690 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,40543,1731522730238, state=OPEN 2024-11-13T18:32:11,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:11,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:11,697 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:11,697 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:11,697 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:11,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:32:11,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,40543,1731522730238 in 212 msec 2024-11-13T18:32:11,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:32:11,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 379 msec 2024-11-13T18:32:11,706 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:11,706 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:32:11,707 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:11,708 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,40543,1731522730238, seqNum=-1] 2024-11-13T18:32:11,708 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:11,710 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49527, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:11,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 851 msec 2024-11-13T18:32:11,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522731716, completionTime=-1 2024-11-13T18:32:11,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:32:11,716 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522791719 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522851719 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:32899, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,720 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,720 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,722 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.442sec 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:11,724 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:32:11,736 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:32:11,736 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:32:11,736 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,32899,1731522730153-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c7ae56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:11,759 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,32899,-1 for getting cluster id 2024-11-13T18:32:11,759 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:32:11,765 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4ddbe52d-2189-4809-8d08-a34688978f1b' 2024-11-13T18:32:11,766 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:32:11,766 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4ddbe52d-2189-4809-8d08-a34688978f1b" 2024-11-13T18:32:11,767 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d65fa4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:11,767 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,32899,-1] 2024-11-13T18:32:11,767 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:32:11,768 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:11,769 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60912, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:32:11,770 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@272348fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:11,770 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:11,772 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,40543,1731522730238, seqNum=-1] 2024-11-13T18:32:11,772 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:11,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33302, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:11,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,32899,1731522730153 2024-11-13T18:32:11,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:11,780 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:32:11,798 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:32:11,798 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:11,799 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38555 2024-11-13T18:32:11,800 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38555 connecting to ZooKeeper ensemble=127.0.0.1:59783 2024-11-13T18:32:11,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:11,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:11,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385550x0, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:11,808 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38555-0x100ed5f0d300002 connected 2024-11-13T18:32:11,808 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-13T18:32:11,808 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-13T18:32:11,808 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:32:11,813 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:32:11,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:32:11,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:11,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38555 2024-11-13T18:32:11,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38555 2024-11-13T18:32:11,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38555 2024-11-13T18:32:11,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38555 2024-11-13T18:32:11,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38555 2024-11-13T18:32:11,819 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(746): ClusterId : 4ddbe52d-2189-4809-8d08-a34688978f1b 2024-11-13T18:32:11,819 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:32:11,821 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:32:11,821 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:32:11,823 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:32:11,824 DEBUG [RS:1;39e84130bbc9:38555 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c027df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:11,836 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;39e84130bbc9:38555 2024-11-13T18:32:11,836 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:32:11,836 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:32:11,837 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:32:11,837 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,32899,1731522730153 with port=38555, startcode=1731522731797 2024-11-13T18:32:11,837 DEBUG [RS:1;39e84130bbc9:38555 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:32:11,839 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57151, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:32:11,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32899 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32899 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,842 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e 2024-11-13T18:32:11,842 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34359 2024-11-13T18:32:11,842 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:32:11,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:11,844 DEBUG [RS:1;39e84130bbc9:38555 {}] zookeeper.ZKUtil(111): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,844 WARN [RS:1;39e84130bbc9:38555 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:11,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,38555,1731522731797] 2024-11-13T18:32:11,844 INFO [RS:1;39e84130bbc9:38555 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:11,844 DEBUG [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,848 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:32:11,850 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:32:11,852 INFO [RS:1;39e84130bbc9:38555 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:32:11,852 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,852 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:32:11,853 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:32:11,853 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:11,854 DEBUG [RS:1;39e84130bbc9:38555 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,855 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,38555,1731522731797-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:11,871 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:32:11,871 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,38555,1731522731797-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,871 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,871 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.Replication(171): 39e84130bbc9,38555,1731522731797 started 2024-11-13T18:32:11,886 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:11,886 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,38555,1731522731797, RpcServer on 39e84130bbc9/172.17.0.3:38555, sessionid=0x100ed5f0d300002 2024-11-13T18:32:11,887 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:32:11,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;39e84130bbc9:38555,5,FailOnTimeoutGroup] 2024-11-13T18:32:11,887 DEBUG [RS:1;39e84130bbc9:38555 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,887 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,38555,1731522731797' 2024-11-13T18:32:11,887 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:32:11,887 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-13T18:32:11,888 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T18:32:11,888 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:32:11,888 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:32:11,888 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:32:11,888 DEBUG [RS:1;39e84130bbc9:38555 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,38555,1731522731797 2024-11-13T18:32:11,888 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,38555,1731522731797' 2024-11-13T18:32:11,889 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:32:11,890 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:32:11,890 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 39e84130bbc9,32899,1731522730153 2024-11-13T18:32:11,890 DEBUG [RS:1;39e84130bbc9:38555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:32:11,890 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@cb496b3 2024-11-13T18:32:11,890 INFO [RS:1;39e84130bbc9:38555 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:32:11,890 INFO [RS:1;39e84130bbc9:38555 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:32:11,890 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T18:32:11,893 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60918, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T18:32:11,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T18:32:11,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T18:32:11,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:32:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T18:32:11,900 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T18:32:11,900 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:11,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-13T18:32:11,901 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T18:32:11,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:32:11,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741835_1011 (size=393) 2024-11-13T18:32:11,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741835_1011 (size=393) 2024-11-13T18:32:11,915 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f5dc2c5f6de8efdd32f63218bdb5469d, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e 2024-11-13T18:32:11,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41943 is added to blk_1073741836_1012 (size=76) 2024-11-13T18:32:11,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39111 is added to blk_1073741836_1012 (size=76) 2024-11-13T18:32:11,933 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:11,933 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing f5dc2c5f6de8efdd32f63218bdb5469d, disabling compactions & flushes 2024-11-13T18:32:11,933 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:11,933 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:11,933 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. after waiting 0 ms 2024-11-13T18:32:11,933 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:11,933 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:11,934 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for f5dc2c5f6de8efdd32f63218bdb5469d: Waiting for close lock at 1731522731933Disabling compacts and flushes for region at 1731522731933Disabling writes for close at 1731522731933Writing region close event to WAL at 1731522731933Closed at 1731522731933 2024-11-13T18:32:11,935 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T18:32:11,936 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731522731935"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522731935"}]},"ts":"1731522731935"} 2024-11-13T18:32:11,939 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T18:32:11,941 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T18:32:11,941 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522731941"}]},"ts":"1731522731941"} 2024-11-13T18:32:11,946 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-13T18:32:11,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f5dc2c5f6de8efdd32f63218bdb5469d, ASSIGN}] 2024-11-13T18:32:11,949 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f5dc2c5f6de8efdd32f63218bdb5469d, ASSIGN 2024-11-13T18:32:11,951 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f5dc2c5f6de8efdd32f63218bdb5469d, ASSIGN; state=OFFLINE, location=39e84130bbc9,40543,1731522730238; forceNewPlan=false, retain=false 2024-11-13T18:32:11,993 INFO [RS:1;39e84130bbc9:38555 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C38555%2C1731522731797, suffix=, logDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797, archiveDir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs, maxLogs=32 2024-11-13T18:32:11,994 INFO [RS:1;39e84130bbc9:38555 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38555%2C1731522731797.1731522731994 2024-11-13T18:32:12,001 INFO [RS:1;39e84130bbc9:38555 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 2024-11-13T18:32:12,002 DEBUG [RS:1;39e84130bbc9:38555 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42839:42839),(127.0.0.1/127.0.0.1:45635:45635)] 2024-11-13T18:32:12,101 INFO [39e84130bbc9:32899 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T18:32:12,102 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f5dc2c5f6de8efdd32f63218bdb5469d, regionState=OPENING, regionLocation=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:12,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f5dc2c5f6de8efdd32f63218bdb5469d, ASSIGN because future has completed 2024-11-13T18:32:12,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5dc2c5f6de8efdd32f63218bdb5469d, server=39e84130bbc9,40543,1731522730238}] 2024-11-13T18:32:12,265 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:12,265 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => f5dc2c5f6de8efdd32f63218bdb5469d, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:12,266 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,266 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:12,266 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,266 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,268 INFO [StoreOpener-f5dc2c5f6de8efdd32f63218bdb5469d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,269 INFO [StoreOpener-f5dc2c5f6de8efdd32f63218bdb5469d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f5dc2c5f6de8efdd32f63218bdb5469d columnFamilyName info 2024-11-13T18:32:12,269 DEBUG [StoreOpener-f5dc2c5f6de8efdd32f63218bdb5469d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:12,270 INFO [StoreOpener-f5dc2c5f6de8efdd32f63218bdb5469d-1 {}] regionserver.HStore(327): Store=f5dc2c5f6de8efdd32f63218bdb5469d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:12,270 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,271 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,271 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,272 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,272 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,274 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,277 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:12,277 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened f5dc2c5f6de8efdd32f63218bdb5469d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800787, jitterRate=0.018254339694976807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:32:12,277 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:12,278 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for f5dc2c5f6de8efdd32f63218bdb5469d: Running coprocessor pre-open hook at 1731522732266Writing region info on filesystem at 1731522732266Initializing all the Stores at 1731522732267 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522732267Cleaning up temporary data from old regions at 1731522732272 (+5 ms)Running coprocessor post-open hooks at 1731522732277 (+5 ms)Region opened successfully at 1731522732278 (+1 ms) 2024-11-13T18:32:12,279 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., pid=6, masterSystemTime=1731522732260 2024-11-13T18:32:12,283 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:12,283 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:12,284 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=f5dc2c5f6de8efdd32f63218bdb5469d, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,40543,1731522730238 2024-11-13T18:32:12,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure f5dc2c5f6de8efdd32f63218bdb5469d, server=39e84130bbc9,40543,1731522730238 because future has completed 2024-11-13T18:32:12,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T18:32:12,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure f5dc2c5f6de8efdd32f63218bdb5469d, server=39e84130bbc9,40543,1731522730238 in 184 msec 2024-11-13T18:32:12,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T18:32:12,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=f5dc2c5f6de8efdd32f63218bdb5469d, ASSIGN in 347 msec 2024-11-13T18:32:12,299 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T18:32:12,299 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522732299"}]},"ts":"1731522732299"} 2024-11-13T18:32:12,302 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-13T18:32:12,303 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T18:32:12,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 408 msec 2024-11-13T18:32:17,170 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:32:17,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:17,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:17,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:17,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:17,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-13T18:32:19,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:32:19,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T18:32:19,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T18:32:19,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-13T18:32:19,796 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:19,796 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T18:32:19,796 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T18:32:19,796 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T18:32:21,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32899 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:32:21,954 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-13T18:32:21,954 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-13T18:32:21,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T18:32:21,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:22,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:22,010 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:22,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:22,014 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:22,014 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:22,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@219c70cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:22,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75255721{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:22,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22d0350b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-44863-hadoop-hdfs-3_4_1-tests_jar-_-any-202823843257404658/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:22,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cc6081e{HTTP/1.1, (http/1.1)}{localhost:44863} 2024-11-13T18:32:22,156 INFO [Time-limited test {}] server.Server(415): Started @118206ms 2024-11-13T18:32:22,157 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:22,273 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:22,278 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:22,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:22,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:22,280 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:22,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fb99827{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:22,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fbc343d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:22,299 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:22,306 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:22,356 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:22,360 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71ab0bdc3724524d with lease ID 0x910f12e34a83dcb: Processing first storage report for DS-f6a03042-8629-4bd6-b4a2-7181de774bbb from datanode DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:22,360 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71ab0bdc3724524d with lease ID 0x910f12e34a83dcb: from storage DS-f6a03042-8629-4bd6-b4a2-7181de774bbb node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:22,361 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71ab0bdc3724524d with lease ID 0x910f12e34a83dcb: Processing first storage report for DS-e3a2a1a4-7fb4-4117-ba61-6c6426ba34c2 from datanode DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:22,361 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71ab0bdc3724524d with lease ID 0x910f12e34a83dcb: from storage DS-e3a2a1a4-7fb4-4117-ba61-6c6426ba34c2 node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:22,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3df1987c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-46483-hadoop-hdfs-3_4_1-tests_jar-_-any-18186121927658526266/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:22,438 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71bfb2ba{HTTP/1.1, (http/1.1)}{localhost:46483} 2024-11-13T18:32:22,438 INFO [Time-limited test {}] server.Server(415): Started @118489ms 2024-11-13T18:32:22,440 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:22,549 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:22,578 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:22,594 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:22,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:22,595 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:32:22,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41559526{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:22,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@495a6aea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:22,606 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data7/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:22,606 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data8/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:22,681 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:22,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b43f1220bc219c9 with lease ID 0x910f12e34a83dcc: Processing first storage report for DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b from datanode DatanodeRegistration(127.0.0.1:41511, datanodeUuid=9853c4c3-913d-4d6a-a882-6cc28581f29b, infoPort=39507, infoSecurePort=0, ipcPort=44855, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:22,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b43f1220bc219c9 with lease ID 0x910f12e34a83dcc: from storage DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b node DatanodeRegistration(127.0.0.1:41511, datanodeUuid=9853c4c3-913d-4d6a-a882-6cc28581f29b, infoPort=39507, infoSecurePort=0, ipcPort=44855, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:22,691 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b43f1220bc219c9 with lease ID 0x910f12e34a83dcc: Processing first storage report for DS-c94ae65e-72c1-4a70-8a40-5170ec761766 from datanode DatanodeRegistration(127.0.0.1:41511, datanodeUuid=9853c4c3-913d-4d6a-a882-6cc28581f29b, infoPort=39507, infoSecurePort=0, ipcPort=44855, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:22,691 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b43f1220bc219c9 with lease ID 0x910f12e34a83dcc: from storage DS-c94ae65e-72c1-4a70-8a40-5170ec761766 node DatanodeRegistration(127.0.0.1:41511, datanodeUuid=9853c4c3-913d-4d6a-a882-6cc28581f29b, infoPort=39507, infoSecurePort=0, ipcPort=44855, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:22,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f0760d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-42963-hadoop-hdfs-3_4_1-tests_jar-_-any-13202098803575034573/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:22,772 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21f536ea{HTTP/1.1, (http/1.1)}{localhost:42963} 2024-11-13T18:32:22,772 INFO [Time-limited test {}] server.Server(415): Started @118823ms 2024-11-13T18:32:22,774 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:22,989 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data9/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:22,990 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data10/current/BP-1474286307-172.17.0.3-1731522729313/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:23,038 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xecf1d601c04b7341 with lease ID 0x910f12e34a83dcd: Processing first storage report for DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa from datanode DatanodeRegistration(127.0.0.1:46239, datanodeUuid=1ff8a346-47ec-4eb2-b82d-1f917cbae548, infoPort=37917, infoSecurePort=0, ipcPort=46553, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecf1d601c04b7341 with lease ID 0x910f12e34a83dcd: from storage DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa node DatanodeRegistration(127.0.0.1:46239, datanodeUuid=1ff8a346-47ec-4eb2-b82d-1f917cbae548, infoPort=37917, infoSecurePort=0, ipcPort=46553, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xecf1d601c04b7341 with lease ID 0x910f12e34a83dcd: Processing first storage report for DS-3e2f9e64-4086-4790-ac7b-3361368b8cf1 from datanode DatanodeRegistration(127.0.0.1:46239, datanodeUuid=1ff8a346-47ec-4eb2-b82d-1f917cbae548, infoPort=37917, infoSecurePort=0, ipcPort=46553, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313) 2024-11-13T18:32:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xecf1d601c04b7341 with lease ID 0x910f12e34a83dcd: from storage DS-3e2f9e64-4086-4790-ac7b-3361368b8cf1 node DatanodeRegistration(127.0.0.1:46239, datanodeUuid=1ff8a346-47ec-4eb2-b82d-1f917cbae548, infoPort=37917, infoSecurePort=0, ipcPort=46553, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:23,118 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,118 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,118 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,119 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta block BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:23,119 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:40448 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40448 dst: /127.0.0.1:39111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:37960 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37960 dst: /127.0.0.1:41943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,121 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:23,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-824047245_22 at /127.0.0.1:50068 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50068 dst: /127.0.0.1:39111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,122 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:23,122 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-824047245_22 at /127.0.0.1:49904 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49904 dst: /127.0.0.1:41943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,125 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:23,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:40416 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40416 dst: /127.0.0.1:39111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:37916 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37916 dst: /127.0.0.1:41943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:40434 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:39111:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40434 dst: /127.0.0.1:39111 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,137 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:37934 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008] {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37934 dst: /127.0.0.1:41943 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55c8142a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:23,139 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65f2c48f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:23,140 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:23,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6471b09b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:23,140 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198c3788{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:23,141 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:23,141 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:23,145 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:23,145 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid 39bde02c-5246-4f51-9588-c50d21cb7aca) service to localhost/127.0.0.1:34359 2024-11-13T18:32:23,147 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:23,149 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data3/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:23,152 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data4/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:23,157 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@71fa2b9f {}] datanode.DataXceiver(331): 127.0.0.1:41943:DataXceiver error processing unknown operation src: /127.0.0.1:38614 dst: /127.0.0.1:41943 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:23,157 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,171 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,180 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta block BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,180 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741832_1008 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,187 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1aa07d80{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:23,187 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c814f59{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:23,188 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:23,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5917cb43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:23,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3d2a60{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:23,191 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:23,191 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:23,191 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid ce824297-27bf-412e-9949-12043232fc2b) service to localhost/127.0.0.1:34359 2024-11-13T18:32:23,191 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:23,192 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data1/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:23,192 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data2/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:23,193 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:23,198 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., hostname=39e84130bbc9,40543,1731522730238, seqNum=2] 2024-11-13T18:32:23,201 ERROR [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e-prefix:39e84130bbc9,40543,1731522730238 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,201 WARN [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e-prefix:39e84130bbc9,40543,1731522730238 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,201 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C40543%2C1731522730238:(num 1731522731193) roll requested 2024-11-13T18:32:23,202 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522743201 2024-11-13T18:32:23,208 WARN [Thread-902 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,209 WARN [Thread-902 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:23,209 WARN [Thread-902 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741838_1018 2024-11-13T18:32:23,212 WARN [Thread-902 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:23,229 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:23,229 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:23,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:23,229 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:23,229 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:23,230 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 2024-11-13T18:32:23,233 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,234 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:23,235 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T18:32:23,235 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T18:32:23,235 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 2024-11-13T18:32:23,239 WARN [IPC Server handler 1 on default port 34359 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741832_1008 2024-11-13T18:32:23,245 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39507:39507),(127.0.0.1/127.0.0.1:37917:37917)] 2024-11-13T18:32:23,245 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:23,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 after 11ms 2024-11-13T18:32:23,856 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:24,347 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:25,246 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:25,247 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 2024-11-13T18:32:25,248 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:25,249 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:25,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:41266 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41511:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41266 dst: /127.0.0.1:41511 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:25,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:53462 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:46239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53462 dst: /127.0.0.1:46239 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:25,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3df1987c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:25,296 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71bfb2ba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:25,296 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:25,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fbc343d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:25,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fb99827{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:25,298 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:25,298 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:25,298 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid 9853c4c3-913d-4d6a-a882-6cc28581f29b) service to localhost/127.0.0.1:34359 2024-11-13T18:32:25,298 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:25,299 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data7/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:25,299 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data8/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:25,299 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:25,857 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:26,348 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:27,246 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:27,247 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]] 2024-11-13T18:32:27,247 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C40543%2C1731522730238:(num 1731522743201) roll requested 2024-11-13T18:32:27,247 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522747247 2024-11-13T18:32:27,256 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41511 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:27,256 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:27,256 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022 2024-11-13T18:32:27,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:53478 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data10]'}, localName='127.0.0.1:46239', datanodeUuid='1ff8a346-47ec-4eb2-b82d-1f917cbae548', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022 to mirror 127.0.0.1:41511 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:27,257 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:53478 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T18:32:27,257 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:53478 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53478 dst: /127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:27,270 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:27,274 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 after 4038ms 2024-11-13T18:32:27,274 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:27,274 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:27,274 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741841_1023 2024-11-13T18:32:27,281 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:27,286 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:27,286 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:27,286 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741842_1024 2024-11-13T18:32:27,287 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:27,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:27,307 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:27,308 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:27,308 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:27,309 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:27,309 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522747247 2024-11-13T18:32:27,311 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42681:42681),(127.0.0.1/127.0.0.1:37917:37917)] 2024-11-13T18:32:27,311 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:27,311 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 is not closed yet, will try archiving it next time 2024-11-13T18:32:27,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46239 is added to blk_1073741839_1021 (size=2431) 2024-11-13T18:32:27,313 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:27,319 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T18:32:27,858 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:28,066 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@10cb2a79[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46239, datanodeUuid=1ff8a346-47ec-4eb2-b82d-1f917cbae548, infoPort=37917, infoSecurePort=0, ipcPort=46553, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741839_1021 to 127.0.0.1:39111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:28,348 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,311 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,326 WARN [ResponseProcessor for block BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,326 WARN [DataStreamer for file /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522747247 block BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:29,327 WARN [PacketResponder: BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46239] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33946 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33946 dst: /127.0.0.1:44591 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:53492 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46239:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53492 dst: /127.0.0.1:46239 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f0760d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:29,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21f536ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:29,343 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:29,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@495a6aea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:29,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41559526{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:29,346 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:29,346 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:29,346 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:29,346 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid 1ff8a346-47ec-4eb2-b82d-1f917cbae548) service to localhost/127.0.0.1:34359 2024-11-13T18:32:29,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data9/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:29,346 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data10/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:29,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:29,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:29,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:32:29,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/16ad7cc8fd6740eab12e2396f03aa5c7 is 1080, key is row0002/info:/1731522745301/Put/seqid=0 2024-11-13T18:32:29,385 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,386 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:29,386 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741844_1027 2024-11-13T18:32:29,387 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:29,388 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,389 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:29,389 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741845_1028 2024-11-13T18:32:29,390 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:29,394 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33972 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029 to mirror 127.0.0.1:39111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,394 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:29,395 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029 2024-11-13T18:32:29,395 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33972 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:29,395 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33972 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33972 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,395 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:29,400 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,400 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33988 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030 to mirror 127.0.0.1:41943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,401 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:29,401 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030 2024-11-13T18:32:29,401 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33988 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:29,401 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:33988 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33988 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:29,401 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:29,402 WARN [IPC Server handler 1 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:29,402 WARN [IPC Server handler 1 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:29,402 WARN [IPC Server handler 1 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:29,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741848_1031 (size=10347) 2024-11-13T18:32:29,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/16ad7cc8fd6740eab12e2396f03aa5c7 2024-11-13T18:32:29,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/16ad7cc8fd6740eab12e2396f03aa5c7 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7 2024-11-13T18:32:29,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7, entries=5, sequenceid=11, filesize=10.1 K 2024-11-13T18:32:29,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for f5dc2c5f6de8efdd32f63218bdb5469d in 477ms, sequenceid=11, compaction requested=false 2024-11-13T18:32:29,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:29,858 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:29,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:29,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-13T18:32:30,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/885ea5dafd2c41f2bde904f1506fbcd4 is 1080, key is row0007/info:/1731522749360/Put/seqid=0 2024-11-13T18:32:30,003 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:30,003 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:30,004 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741849_1032 2024-11-13T18:32:30,004 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:30,006 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:30,006 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:30,006 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741850_1033 2024-11-13T18:32:30,007 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:30,008 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:30,008 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:30,008 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741851_1034 2024-11-13T18:32:30,009 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:30,010 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:30,010 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:30,010 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741852_1035 2024-11-13T18:32:30,011 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:30,011 WARN [IPC Server handler 1 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:30,011 WARN [IPC Server handler 1 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:30,011 WARN [IPC Server handler 1 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:30,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741853_1036 (size=12506) 2024-11-13T18:32:30,349 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:30,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/885ea5dafd2c41f2bde904f1506fbcd4 2024-11-13T18:32:30,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/885ea5dafd2c41f2bde904f1506fbcd4 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4 2024-11-13T18:32:30,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4, entries=7, sequenceid=24, filesize=12.2 K 2024-11-13T18:32:30,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for f5dc2c5f6de8efdd32f63218bdb5469d in 445ms, sequenceid=24, compaction requested=false 2024-11-13T18:32:30,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:30,439 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-13T18:32:30,439 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:30,439 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4 because midkey is the same as first or last row 2024-11-13T18:32:31,312 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,312 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]] 2024-11-13T18:32:31,312 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C40543%2C1731522730238:(num 1731522747247) roll requested 2024-11-13T18:32:31,313 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522751312 2024-11-13T18:32:31,316 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,317 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:31,317 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741854_1037 2024-11-13T18:32:31,317 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:31,319 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,319 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:31,319 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741855_1038 2024-11-13T18:32:31,320 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:31,321 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,322 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:31,322 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741856_1039 2024-11-13T18:32:31,322 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:31,324 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,326 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:31,326 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741857_1040 2024-11-13T18:32:31,327 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:31,328 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:31,328 WARN [IPC Server handler 4 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:31,328 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:31,335 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:31,335 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:31,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:31,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:31,336 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:31,336 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522747247 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522751312 2024-11-13T18:32:31,337 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42681:42681)] 2024-11-13T18:32:31,337 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:31,337 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522747247 is not closed yet, will try archiving it next time 2024-11-13T18:32:31,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741843_1026 (size=25992) 2024-11-13T18:32:31,342 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522743201 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C40543%2C1731522730238.1731522743201 2024-11-13T18:32:31,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@206b275f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741848_1031 to 127.0.0.1:41511 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:31,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2eaccd22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741853_1036 to 127.0.0.1:41943 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:31,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:31,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T18:32:31,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/3a974e1b51244c46ba5683191a8af456 is 1079, key is tmprow/info:/1731522751426/Put/seqid=0 2024-11-13T18:32:31,448 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,448 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:31,448 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741859_1042 2024-11-13T18:32:31,449 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:31,451 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,451 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:31,451 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741860_1043 2024-11-13T18:32:31,452 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:31,460 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,460 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:31,460 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044 2024-11-13T18:32:31,461 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:31,460 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:34042 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044 to mirror 127.0.0.1:41943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:31,462 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:34042 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:31,462 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:34042 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34042 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:31,464 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,464 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:31,464 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741862_1045 2024-11-13T18:32:31,465 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:31,466 WARN [IPC Server handler 0 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:31,466 WARN [IPC Server handler 0 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:31,466 WARN [IPC Server handler 0 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741863_1046 (size=6027) 2024-11-13T18:32:31,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/3a974e1b51244c46ba5683191a8af456 2024-11-13T18:32:31,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/3a974e1b51244c46ba5683191a8af456 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456 2024-11-13T18:32:31,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456, entries=1, sequenceid=34, filesize=5.9 K 2024-11-13T18:32:31,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f5dc2c5f6de8efdd32f63218bdb5469d in 78ms, sequenceid=34, compaction requested=true 2024-11-13T18:32:31,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:31,509 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-13T18:32:31,509 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:31,509 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4 because midkey is the same as first or last row 2024-11-13T18:32:31,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5dc2c5f6de8efdd32f63218bdb5469d:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:32:31,511 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:31,511 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:32:31,512 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:32:31,512 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1541): f5dc2c5f6de8efdd32f63218bdb5469d/info is initiating minor compaction (all files) 2024-11-13T18:32:31,512 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5dc2c5f6de8efdd32f63218bdb5469d/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:31,513 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456] into tmpdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp, totalSize=28.2 K 2024-11-13T18:32:31,513 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 16ad7cc8fd6740eab12e2396f03aa5c7, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731522745301 2024-11-13T18:32:31,514 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 885ea5dafd2c41f2bde904f1506fbcd4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731522749360 2024-11-13T18:32:31,514 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3a974e1b51244c46ba5683191a8af456, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731522751426 2024-11-13T18:32:31,549 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5dc2c5f6de8efdd32f63218bdb5469d#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:32:31,550 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/dd45e33678594ae4b565bf1681e92799 is 1080, key is row0002/info:/1731522745301/Put/seqid=0 2024-11-13T18:32:31,552 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,553 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:31,553 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741864_1047 2024-11-13T18:32:31,553 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:31,555 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,555 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:31,555 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741865_1048 2024-11-13T18:32:31,556 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:31,557 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,557 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:31,557 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741866_1049 2024-11-13T18:32:31,558 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:31,559 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:31,559 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:31,559 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741867_1050 2024-11-13T18:32:31,560 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:31,561 WARN [IPC Server handler 0 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:31,561 WARN [IPC Server handler 0 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:31,561 WARN [IPC Server handler 0 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:31,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741868_1051 (size=17994) 2024-11-13T18:32:31,581 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/dd45e33678594ae4b565bf1681e92799 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 2024-11-13T18:32:31,595 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5dc2c5f6de8efdd32f63218bdb5469d/info of f5dc2c5f6de8efdd32f63218bdb5469d into dd45e33678594ae4b565bf1681e92799(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:32:31,595 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:31,595 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., storeName=f5dc2c5f6de8efdd32f63218bdb5469d/info, priority=13, startTime=1731522751509; duration=0sec 2024-11-13T18:32:31,595 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T18:32:31,595 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:31,595 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 because midkey is the same as first or last row 2024-11-13T18:32:31,595 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 because midkey is the same as first or last row 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 because midkey is the same as first or last row 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:31,596 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5dc2c5f6de8efdd32f63218bdb5469d:info 2024-11-13T18:32:31,739 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:31,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522747247 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C40543%2C1731522730238.1731522747247 2024-11-13T18:32:31,858 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,349 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:32,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T18:32:32,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/8aaaeaedd30849319e9ce500edb2c442 is 1079, key is tmprow/info:/1731522752854/Put/seqid=0 2024-11-13T18:32:32,878 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,878 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:32,878 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741869_1052 2024-11-13T18:32:32,881 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:32,886 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,886 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:32,886 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741870_1053 2024-11-13T18:32:32,892 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:32,896 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,896 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:32,896 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741871_1054 2024-11-13T18:32:32,897 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:32,914 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:32,915 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:32,916 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741872_1055 2024-11-13T18:32:32,916 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:32,918 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:32,918 WARN [IPC Server handler 4 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:32,918 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:32,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741873_1056 (size=6027) 2024-11-13T18:32:32,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/8aaaeaedd30849319e9ce500edb2c442 2024-11-13T18:32:32,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/8aaaeaedd30849319e9ce500edb2c442 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442 2024-11-13T18:32:32,967 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442, entries=1, sequenceid=45, filesize=5.9 K 2024-11-13T18:32:32,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f5dc2c5f6de8efdd32f63218bdb5469d in 112ms, sequenceid=45, compaction requested=false 2024-11-13T18:32:32,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:32,968 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-13T18:32:32,968 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:32,968 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 because midkey is the same as first or last row 2024-11-13T18:32:33,341 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:33,342 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]] 2024-11-13T18:32:33,342 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C40543%2C1731522730238:(num 1731522751312) roll requested 2024-11-13T18:32:33,342 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522753342 2024-11-13T18:32:33,347 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:33,347 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:33,347 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741874_1057 2024-11-13T18:32:33,348 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:33,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60770 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058 to mirror 127.0.0.1:41511 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:33,352 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41511 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:33,352 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60770 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T18:32:33,352 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:33,352 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058 2024-11-13T18:32:33,352 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60770 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60770 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:33,357 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:33,359 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:33,359 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:33,359 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741876_1059 2024-11-13T18:32:33,360 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:33,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60782 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060 to mirror 127.0.0.1:39111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:33,369 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60782 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T18:32:33,370 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:33,370 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:33,371 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060 2024-11-13T18:32:33,371 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60782 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60782 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:33,373 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:33,374 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:33,374 WARN [IPC Server handler 4 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:33,375 WARN [IPC Server handler 4 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:33,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:33,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:33,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:33,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:33,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:33,387 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522751312 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522753342 2024-11-13T18:32:33,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741858_1041 (size=13591) 2024-11-13T18:32:33,390 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:33,402 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42681:42681)] 2024-11-13T18:32:33,402 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 is not closed yet, will try archiving it next time 2024-11-13T18:32:33,859 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:34,285 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T18:32:34,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/0bd19f6cb9d64ad9b57f2fdebc01590a is 1079, key is tmprow/info:/1731522754284/Put/seqid=0 2024-11-13T18:32:34,297 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,297 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:34,297 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741879_1062 2024-11-13T18:32:34,298 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:34,299 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,299 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:34,299 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741880_1063 2024-11-13T18:32:34,300 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:34,301 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,301 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:34,301 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741881_1064 2024-11-13T18:32:34,302 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:34,304 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39111 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60798 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065 to mirror 127.0.0.1:39111 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,304 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:34,304 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065 2024-11-13T18:32:34,304 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60798 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:34,304 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60798 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60798 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,305 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:34,315 WARN [IPC Server handler 2 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:34,315 WARN [IPC Server handler 2 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:34,315 WARN [IPC Server handler 2 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:34,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741883_1066 (size=6027) 2024-11-13T18:32:34,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/0bd19f6cb9d64ad9b57f2fdebc01590a 2024-11-13T18:32:34,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/0bd19f6cb9d64ad9b57f2fdebc01590a as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a 2024-11-13T18:32:34,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a, entries=1, sequenceid=55, filesize=5.9 K 2024-11-13T18:32:34,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for f5dc2c5f6de8efdd32f63218bdb5469d in 64ms, sequenceid=55, compaction requested=true 2024-11-13T18:32:34,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:34,349 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-13T18:32:34,349 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:34,350 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 because midkey is the same as first or last row 2024-11-13T18:32:34,350 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5dc2c5f6de8efdd32f63218bdb5469d:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:32:34,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:34,350 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:32:34,351 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:32:34,352 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1541): f5dc2c5f6de8efdd32f63218bdb5469d/info is initiating minor compaction (all files) 2024-11-13T18:32:34,352 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5dc2c5f6de8efdd32f63218bdb5469d/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:34,352 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a] into tmpdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp, totalSize=29.3 K 2024-11-13T18:32:34,353 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting dd45e33678594ae4b565bf1681e92799, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731522745301 2024-11-13T18:32:34,353 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8aaaeaedd30849319e9ce500edb2c442, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731522752854 2024-11-13T18:32:34,354 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0bd19f6cb9d64ad9b57f2fdebc01590a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731522754284 2024-11-13T18:32:34,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2eaccd22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741843_1026 to 127.0.0.1:41943 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@206b275f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741863_1046 to 127.0.0.1:41943 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,376 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5dc2c5f6de8efdd32f63218bdb5469d#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:32:34,377 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/e39e462a0bcd4d0188d1681d5df1d68e is 1080, key is row0002/info:/1731522745301/Put/seqid=0 2024-11-13T18:32:34,389 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41943 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,389 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60824 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067 to mirror 127.0.0.1:41943 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,390 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:34,390 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60824 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:34,390 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067 2024-11-13T18:32:34,391 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:34,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60824 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60824 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,393 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,393 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]) is bad. 2024-11-13T18:32:34,393 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741885_1068 2024-11-13T18:32:34,395 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39111,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK] 2024-11-13T18:32:34,402 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60836 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,402 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:34,403 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069 2024-11-13T18:32:34,403 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60836 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:34,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:60836 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60836 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:34,403 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:34,405 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:34,405 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:34,405 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741887_1070 2024-11-13T18:32:34,405 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:34,406 WARN [IPC Server handler 2 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T18:32:34,406 WARN [IPC Server handler 2 on default port 34359 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T18:32:34,406 WARN [IPC Server handler 2 on default port 34359 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T18:32:34,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741888_1071 (size=18097) 2024-11-13T18:32:34,427 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/e39e462a0bcd4d0188d1681d5df1d68e as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e 2024-11-13T18:32:34,437 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5dc2c5f6de8efdd32f63218bdb5469d/info of f5dc2c5f6de8efdd32f63218bdb5469d into e39e462a0bcd4d0188d1681d5df1d68e(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:34,437 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., storeName=f5dc2c5f6de8efdd32f63218bdb5469d/info, priority=13, startTime=1731522754350; duration=0sec 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e because midkey is the same as first or last row 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:34,437 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e because midkey is the same as first or last row 2024-11-13T18:32:34,438 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T18:32:34,438 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:34,438 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e because midkey is the same as first or last row 2024-11-13T18:32:34,438 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:34,438 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5dc2c5f6de8efdd32f63218bdb5469d:info 2024-11-13T18:32:35,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2eaccd22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741873_1056 to 127.0.0.1:39111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:35,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@206b275f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741868_1051 to 127.0.0.1:39111 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:35,403 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:35,403 WARN [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-13T18:32:35,518 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:35,531 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:35,566 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:35,566 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:35,566 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:35,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13c2f5a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:35,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1191c470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:35,750 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d95479b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/java.io.tmpdir/jetty-localhost-38467-hadoop-hdfs-3_4_1-tests_jar-_-any-511565499657665782/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:35,752 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@469c1f5d{HTTP/1.1, (http/1.1)}{localhost:38467} 2024-11-13T18:32:35,752 INFO [Time-limited test {}] server.Server(415): Started @131803ms 2024-11-13T18:32:35,754 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:35,859 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:35,937 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:35,962 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3243cd23d6088549 with lease ID 0x910f12e34a83dce: from storage DS-086cb97f-6415-4953-a114-670ffd0d120a node DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3243cd23d6088549 with lease ID 0x910f12e34a83dce: from storage DS-babbce77-2d5c-44b8-b904-19e327a01341 node DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:36,350 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:37,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2eaccd22[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741883_1066 to 127.0.0.1:41511 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:37,368 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@206b275f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741858_1041 to 127.0.0.1:46239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:37,403 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:37,860 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:38,350 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:38,367 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@206b275f[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44591, datanodeUuid=5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d, infoPort=42681, infoSecurePort=0, ipcPort=43295, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741888_1071 to 127.0.0.1:41511 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:39,404 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:39,860 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,114 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:32:40,351 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,875 ERROR [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData-prefix:39e84130bbc9,32899,1731522730153 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,875 WARN [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData-prefix:39e84130bbc9,32899,1731522730153 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,875 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C32899%2C1731522730153:(num 1731522730774) roll requested 2024-11-13T18:32:40,875 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C32899%2C1731522730153.1731522760875 2024-11-13T18:32:40,879 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,879 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:60866 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:40,880 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:40,880 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072 2024-11-13T18:32:40,880 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:60866 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T18:32:40,880 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:60866 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60866 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:40,880 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:40,881 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,882 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK], DatanodeInfoWithStorage[127.0.0.1:39163,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]) is bad. 2024-11-13T18:32:40,882 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741890_1073 2024-11-13T18:32:40,882 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK] 2024-11-13T18:32:40,883 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,883 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:39163,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:40,883 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741891_1074 2024-11-13T18:32:40,883 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:40,887 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:40,887 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:40,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:40,888 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:40,888 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:40,888 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522760875 2024-11-13T18:32:40,888 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,888 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:40,888 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 2024-11-13T18:32:40,889 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34827:34827),(127.0.0.1/127.0.0.1:42681:42681)] 2024-11-13T18:32:40,889 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 is not closed yet, will try archiving it next time 2024-11-13T18:32:40,889 WARN [IPC Server handler 0 on default port 34359 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1006 2024-11-13T18:32:40,889 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 after 1ms 2024-11-13T18:32:41,404 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:41,860 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:43,404 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:43,861 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:44,891 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 after 4003ms 2024-11-13T18:32:45,405 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:45,861 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:45,955 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6ab09eb5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741836_1012 to 127.0.0.1:46239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:45,955 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68e0b738[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741828_1004 to 127.0.0.1:41511 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:45,981 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5b21038d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741832_1008, datanode=DatanodeInfoWithStorage[127.0.0.1:41943,null,null]) java.net.ConnectException: Call From 39e84130bbc9/172.17.0.3 to localhost:38845 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T18:32:45,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741832_1020 (size=455) 2024-11-13T18:32:46,294 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522731193 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C40543%2C1731522730238.1731522731193 2024-11-13T18:32:46,296 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522751312 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C40543%2C1731522730238.1731522751312 2024-11-13T18:32:46,955 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68e0b738[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741826_1002 to 127.0.0.1:46239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:46,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741835_1011 (size=393) 2024-11-13T18:32:47,405 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:47,861 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:48,955 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6ab09eb5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39163, datanodeUuid=39bde02c-5246-4f51-9588-c50d21cb7aca, infoPort=34827, infoSecurePort=0, ipcPort=35271, storageInfo=lv=-57;cid=testClusterID;nsid=405222347;c=1731522729313):Failed to transfer BP-1474286307-172.17.0.3-1731522729313:blk_1073741825_1001 to 127.0.0.1:46239 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:48,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741832_1020 (size=455) 2024-11-13T18:32:49,129 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.1731522769129 2024-11-13T18:32:49,132 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,133 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:49,133 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741893_1077 2024-11-13T18:32:49,133 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:49,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:41274 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data4]'}, localName='127.0.0.1:39163', datanodeUuid='39bde02c-5246-4f51-9588-c50d21cb7aca', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,136 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,136 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39163,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:49,136 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078 2024-11-13T18:32:49,136 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:41274 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T18:32:49,136 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_810822159_22 at /127.0.0.1:41274 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41274 dst: /127.0.0.1:39163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,137 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:49,148 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,148 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,148 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,148 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,149 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,149 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522753342 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522769129 2024-11-13T18:32:49,150 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34827:34827),(127.0.0.1/127.0.0.1:42681:42681)] 2024-11-13T18:32:49,150 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522753342 is not closed yet, will try archiving it next time 2024-11-13T18:32:49,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741878_1061 (size=12911) 2024-11-13T18:32:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:49,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T18:32:49,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 is 1080, key is row0013/info:/1731522769152/Put/seqid=0 2024-11-13T18:32:49,166 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42122 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,166 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:49,166 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42122 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:49,166 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080 2024-11-13T18:32:49,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42122 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741896_1080] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42122 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,168 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:49,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741897_1081 (size=8190) 2024-11-13T18:32:49,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741897_1081 (size=8190) 2024-11-13T18:32:49,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 2024-11-13T18:32:49,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 2024-11-13T18:32:49,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02, entries=3, sequenceid=66, filesize=8.0 K 2024-11-13T18:32:49,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for f5dc2c5f6de8efdd32f63218bdb5469d in 37ms, sequenceid=66, compaction requested=false 2024-11-13T18:32:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-13T18:32:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:49,192 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e because midkey is the same as first or last row 2024-11-13T18:32:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40543 {}] regionserver.HRegion(8855): Flush requested on f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:49,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing f5dc2c5f6de8efdd32f63218bdb5469d 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-13T18:32:49,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/55c5ca89f2dc4644b16641d9b949c4e8 is 1080, key is row0015/info:/1731522769158/Put/seqid=0 2024-11-13T18:32:49,391 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,391 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK], DatanodeInfoWithStorage[127.0.0.1:39163,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK]) is bad. 2024-11-13T18:32:49,391 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741898_1082 2024-11-13T18:32:49,392 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41511,DS-5649132b-0cd7-49d9-8c16-d56e2a5a423b,DISK] 2024-11-13T18:32:49,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:41296 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data4]'}, localName='127.0.0.1:39163', datanodeUuid='39bde02c-5246-4f51-9588-c50d21cb7aca', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,394 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,394 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:41296 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:49,394 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39163,DS-086cb97f-6415-4953-a114-670ffd0d120a,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:49,394 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083 2024-11-13T18:32:49,394 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:41296 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741899_1083] {}] datanode.DataXceiver(331): 127.0.0.1:39163:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41296 dst: /127.0.0.1:39163 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,395 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:49,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741900_1084 (size=14660) 2024-11-13T18:32:49,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741900_1084 (size=14660) 2024-11-13T18:32:49,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/55c5ca89f2dc4644b16641d9b949c4e8 2024-11-13T18:32:49,405 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,406 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-13T18:32:49,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/55c5ca89f2dc4644b16641d9b949c4e8 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8 2024-11-13T18:32:49,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8, entries=9, sequenceid=79, filesize=14.3 K 2024-11-13T18:32:49,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for f5dc2c5f6de8efdd32f63218bdb5469d in 41ms, sequenceid=79, compaction requested=true 2024-11-13T18:32:49,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:49,424 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-13T18:32:49,424 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:49,424 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e because midkey is the same as first or last row 2024-11-13T18:32:49,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f5dc2c5f6de8efdd32f63218bdb5469d:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:32:49,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:49,425 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:32:49,426 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:32:49,426 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1541): f5dc2c5f6de8efdd32f63218bdb5469d/info is initiating minor compaction (all files) 2024-11-13T18:32:49,426 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of f5dc2c5f6de8efdd32f63218bdb5469d/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,427 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8] into tmpdir=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp, totalSize=40.0 K 2024-11-13T18:32:49,427 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting e39e462a0bcd4d0188d1681d5df1d68e, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731522745301 2024-11-13T18:32:49,428 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting a6f22bd9a6a64d3e8bae26afb1a8ce02, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731522755300 2024-11-13T18:32:49,428 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] compactions.Compactor(225): Compacting 55c5ca89f2dc4644b16641d9b949c4e8, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731522769158 2024-11-13T18:32:49,445 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f5dc2c5f6de8efdd32f63218bdb5469d#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:32:49,446 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/3c7ceb57e9454f5c8ddb53682c76f4a3 is 1080, key is row0002/info:/1731522745301/Put/seqid=0 2024-11-13T18:32:49,449 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46239 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42192 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6]'}, localName='127.0.0.1:44591', datanodeUuid='5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d', xmitsInProgress=0}:Exception transferring block BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085 to mirror 127.0.0.1:46239 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,449 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44591,DS-f6a03042-8629-4bd6-b4a2-7181de774bbb,DISK], DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK]) is bad. 2024-11-13T18:32:49,449 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085 2024-11-13T18:32:49,450 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42192 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T18:32:49,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1111914008_22 at /127.0.0.1:42192 [Receiving block BP-1474286307-172.17.0.3-1731522729313:blk_1073741901_1085] {}] datanode.DataXceiver(331): 127.0.0.1:44591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42192 dst: /127.0.0.1:44591 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:49,450 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46239,DS-0c82ffb8-70b8-4c94-92c6-895ea4158ffa,DISK] 2024-11-13T18:32:49,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741902_1086 (size=28989) 2024-11-13T18:32:49,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741902_1086 (size=28989) 2024-11-13T18:32:49,464 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/.tmp/info/3c7ceb57e9454f5c8ddb53682c76f4a3 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3c7ceb57e9454f5c8ddb53682c76f4a3 2024-11-13T18:32:49,471 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in f5dc2c5f6de8efdd32f63218bdb5469d/info of f5dc2c5f6de8efdd32f63218bdb5469d into 3c7ceb57e9454f5c8ddb53682c76f4a3(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for f5dc2c5f6de8efdd32f63218bdb5469d: 2024-11-13T18:32:49,472 INFO [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., storeName=f5dc2c5f6de8efdd32f63218bdb5469d/info, priority=13, startTime=1731522769425; duration=0sec 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3c7ceb57e9454f5c8ddb53682c76f4a3 because midkey is the same as first or last row 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3c7ceb57e9454f5c8ddb53682c76f4a3 because midkey is the same as first or last row 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:32:49,472 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3c7ceb57e9454f5c8ddb53682c76f4a3 because midkey is the same as first or last row 2024-11-13T18:32:49,473 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:32:49,473 DEBUG [RS:0;39e84130bbc9:40543-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f5dc2c5f6de8efdd32f63218bdb5469d:info 2024-11-13T18:32:49,551 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.1731522753342 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C40543%2C1731522730238.1731522753342 2024-11-13T18:32:49,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:32:49,584 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:49,584 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:49,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:49,585 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:49,585 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:32:49,585 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:32:49,585 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1036791079, stopped=false 2024-11-13T18:32:49,585 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,32899,1731522730153 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:49,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:49,587 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:49,587 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:32:49,588 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:49,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:49,588 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,40543,1731522730238' ***** 2024-11-13T18:32:49,588 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:32:49,588 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,38555,1731522731797' ***** 2024-11-13T18:32:49,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:49,588 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:32:49,588 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:32:49,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:49,588 INFO [RS:0;39e84130bbc9:40543 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:32:49,588 INFO [RS:0;39e84130bbc9:40543 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:32:49,588 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:32:49,588 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(3091): Received CLOSE for f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:49,589 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,38555,1731522731797 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;39e84130bbc9:38555. 2024-11-13T18:32:49,589 DEBUG [RS:1;39e84130bbc9:38555 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:49,589 DEBUG [RS:1;39e84130bbc9:38555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,40543,1731522730238 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:49,589 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,38555,1731522731797; all regions closed. 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:40543. 2024-11-13T18:32:49,589 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:32:49,589 DEBUG [RS:0;39e84130bbc9:40543 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:32:49,589 DEBUG [RS:0;39e84130bbc9:40543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:32:49,589 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:32:49,589 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:49,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,590 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,590 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing f5dc2c5f6de8efdd32f63218bdb5469d, disabling compactions & flushes 2024-11-13T18:32:49,590 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,590 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,590 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,590 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,590 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. after waiting 0 ms 2024-11-13T18:32:49,590 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,593 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T18:32:49,593 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1325): Online Regions={f5dc2c5f6de8efdd32f63218bdb5469d=TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T18:32:49,593 DEBUG [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, f5dc2c5f6de8efdd32f63218bdb5469d 2024-11-13T18:32:49,593 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:49,593 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:49,593 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:49,594 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:49,594 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:49,594 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-13T18:32:49,594 ERROR [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e-prefix:39e84130bbc9,40543,1731522730238.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,594 WARN [FSHLog-0-hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e-prefix:39e84130bbc9,40543,1731522730238.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,594 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C40543%2C1731522730238.meta:.meta(num 1731522731650) roll requested 2024-11-13T18:32:49,595 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40543%2C1731522730238.meta.1731522769594.meta 2024-11-13T18:32:49,598 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,598 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,598 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 2024-11-13T18:32:49,598 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8] to archive 2024-11-13T18:32:49,599 WARN [IPC Server handler 4 on default port 34359 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 has not been closed. Lease recovery is in progress. RecoveryId = 1088 for block blk_1073741837_1013 2024-11-13T18:32:49,600 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 after 1ms 2024-11-13T18:32:49,600 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:32:49,603 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/16ad7cc8fd6740eab12e2396f03aa5c7 2024-11-13T18:32:49,603 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,604 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,604 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522769594.meta 2024-11-13T18:32:49,604 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/885ea5dafd2c41f2bde904f1506fbcd4 2024-11-13T18:32:49,605 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,605 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41943,DS-041f18ee-f73b-44ec-940b-f43c5f131700,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:32:49,605 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta 2024-11-13T18:32:49,605 WARN [IPC Server handler 2 on default port 34359 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta has not been closed. Lease recovery is in progress. RecoveryId = 1089 for block blk_1073741834_1010 2024-11-13T18:32:49,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta after 1ms 2024-11-13T18:32:49,606 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/dd45e33678594ae4b565bf1681e92799 2024-11-13T18:32:49,608 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/3a974e1b51244c46ba5683191a8af456 2024-11-13T18:32:49,609 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/8aaaeaedd30849319e9ce500edb2c442 2024-11-13T18:32:49,610 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34827:34827),(127.0.0.1/127.0.0.1:42681:42681)] 2024-11-13T18:32:49,610 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta is not closed yet, will try archiving it next time 2024-11-13T18:32:49,611 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/e39e462a0bcd4d0188d1681d5df1d68e 2024-11-13T18:32:49,612 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/0bd19f6cb9d64ad9b57f2fdebc01590a 2024-11-13T18:32:49,613 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/a6f22bd9a6a64d3e8bae26afb1a8ce02 2024-11-13T18:32:49,615 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/info/55c5ca89f2dc4644b16641d9b949c4e8 2024-11-13T18:32:49,615 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39e84130bbc9:32899 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T18:32:49,616 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [16ad7cc8fd6740eab12e2396f03aa5c7=10347, 885ea5dafd2c41f2bde904f1506fbcd4=12506, dd45e33678594ae4b565bf1681e92799=17994, 3a974e1b51244c46ba5683191a8af456=6027, 8aaaeaedd30849319e9ce500edb2c442=6027, e39e462a0bcd4d0188d1681d5df1d68e=18097, 0bd19f6cb9d64ad9b57f2fdebc01590a=6027, a6f22bd9a6a64d3e8bae26afb1a8ce02=8190, 55c5ca89f2dc4644b16641d9b949c4e8=14660] 2024-11-13T18:32:49,622 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/default/TestLogRolling-testLogRollOnDatanodeDeath/f5dc2c5f6de8efdd32f63218bdb5469d/recovered.edits/83.seqid, newMaxSeqId=83, maxSeqId=1 2024-11-13T18:32:49,622 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,623 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for f5dc2c5f6de8efdd32f63218bdb5469d: Waiting for close lock at 1731522769590Running coprocessor pre-close hooks at 1731522769590Disabling compacts and flushes for region at 1731522769590Disabling writes for close at 1731522769590Writing region close event to WAL at 1731522769617 (+27 ms)Running coprocessor post-close hooks at 1731522769622 (+5 ms)Closed at 1731522769622 2024-11-13T18:32:49,623 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d. 2024-11-13T18:32:49,627 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/info/ee7d7deaafe64dc0b22475103fc740bc is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731522731894.f5dc2c5f6de8efdd32f63218bdb5469d./info:regioninfo/1731522732284/Put/seqid=0 2024-11-13T18:32:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741904_1090 (size=7089) 2024-11-13T18:32:49,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741904_1090 (size=7089) 2024-11-13T18:32:49,635 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/info/ee7d7deaafe64dc0b22475103fc740bc 2024-11-13T18:32:49,666 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/ns/8aa30eddc0f5434f9ab419eee3c25b51 is 43, key is default/ns:d/1731522731710/Put/seqid=0 2024-11-13T18:32:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741905_1091 (size=5153) 2024-11-13T18:32:49,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741905_1091 (size=5153) 2024-11-13T18:32:49,672 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/ns/8aa30eddc0f5434f9ab419eee3c25b51 2024-11-13T18:32:49,696 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/table/abe8d2831834403e9f35190bbc3ef288 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731522732299/Put/seqid=0 2024-11-13T18:32:49,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741906_1092 (size=5424) 2024-11-13T18:32:49,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741906_1092 (size=5424) 2024-11-13T18:32:49,702 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/table/abe8d2831834403e9f35190bbc3ef288 2024-11-13T18:32:49,709 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/info/ee7d7deaafe64dc0b22475103fc740bc as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/info/ee7d7deaafe64dc0b22475103fc740bc 2024-11-13T18:32:49,716 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/info/ee7d7deaafe64dc0b22475103fc740bc, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T18:32:49,717 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/ns/8aa30eddc0f5434f9ab419eee3c25b51 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/ns/8aa30eddc0f5434f9ab419eee3c25b51 2024-11-13T18:32:49,724 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/ns/8aa30eddc0f5434f9ab419eee3c25b51, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T18:32:49,725 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/.tmp/table/abe8d2831834403e9f35190bbc3ef288 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/table/abe8d2831834403e9f35190bbc3ef288 2024-11-13T18:32:49,731 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/table/abe8d2831834403e9f35190bbc3ef288, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T18:32:49,732 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-13T18:32:49,739 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T18:32:49,740 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:49,740 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:49,740 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522769593Running coprocessor pre-close hooks at 1731522769593Disabling compacts and flushes for region at 1731522769593Disabling writes for close at 1731522769594 (+1 ms)Obtaining lock to block concurrent updates at 1731522769594Preparing flush snapshotting stores in 1588230740 at 1731522769594Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731522769594Flushing stores of hbase:meta,,1.1588230740 at 1731522769610 (+16 ms)Flushing 1588230740/info: creating writer at 1731522769610Flushing 1588230740/info: appending metadata at 1731522769627 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731522769627Flushing 1588230740/ns: creating writer at 1731522769642 (+15 ms)Flushing 1588230740/ns: appending metadata at 1731522769665 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1731522769665Flushing 1588230740/table: creating writer at 1731522769679 (+14 ms)Flushing 1588230740/table: appending metadata at 1731522769695 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731522769695Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30750781: reopening flushed file at 1731522769708 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@337a20a4: reopening flushed file at 1731522769716 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22b7a2bb: reopening flushed file at 1731522769724 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1731522769732 (+8 ms)Writing region close event to WAL at 1731522769735 (+3 ms)Running coprocessor post-close hooks at 1731522769740 (+5 ms)Closed at 1731522769740 2024-11-13T18:32:49,740 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:49,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T18:32:49,793 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,40543,1731522730238; all regions closed. 2024-11-13T18:32:49,794 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,794 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:49,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:32:49,795 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,795 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:49,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741903_1087 (size=825) 2024-11-13T18:32:49,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741903_1087 (size=825) 2024-11-13T18:32:49,860 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T18:32:49,860 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T18:32:49,862 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:50,022 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T18:32:50,022 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T18:32:51,028 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:51,741 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T18:32:51,741 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T18:32:51,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741833_1009 (size=32) 2024-11-13T18:32:51,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:52,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:52,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:53,601 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 after 4002ms 2024-11-13T18:32:53,607 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta after 4002ms 2024-11-13T18:32:54,598 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T18:32:54,600 DEBUG [RS:1;39e84130bbc9:38555 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs 2024-11-13T18:32:54,600 INFO [RS:1;39e84130bbc9:38555 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C38555%2C1731522731797:(num 1731522731994) 2024-11-13T18:32:54,600 DEBUG [RS:1;39e84130bbc9:38555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:54,600 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:54,600 INFO [RS:1;39e84130bbc9:38555 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:32:54,601 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:54,601 INFO [RS:1;39e84130bbc9:38555 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38555 2024-11-13T18:32:54,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,38555,1731522731797 2024-11-13T18:32:54,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:54,603 INFO [RS:1;39e84130bbc9:38555 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:54,605 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,38555,1731522731797] 2024-11-13T18:32:54,607 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,38555,1731522731797 already deleted, retry=false 2024-11-13T18:32:54,607 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,38555,1731522731797 expired; onlineServers=1 2024-11-13T18:32:54,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:54,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,638 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,639 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,647 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:54,705 INFO [RS:1;39e84130bbc9:38555 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:54,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38555-0x100ed5f0d300002, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:54,705 INFO [RS:1;39e84130bbc9:38555 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,38555,1731522731797; zookeeper connection closed. 2024-11-13T18:32:54,706 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 2024-11-13T18:32:54,795 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T18:32:54,800 DEBUG [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs 2024-11-13T18:32:54,800 INFO [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C40543%2C1731522730238.meta:.meta(num 1731522769594) 2024-11-13T18:32:54,800 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,800 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,801 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,801 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741895_1079 (size=16308) 2024-11-13T18:32:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741895_1079 (size=16308) 2024-11-13T18:32:54,807 DEBUG [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs 2024-11-13T18:32:54,807 INFO [RS:0;39e84130bbc9:40543 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C40543%2C1731522730238:(num 1731522769129) 2024-11-13T18:32:54,807 DEBUG [RS:0;39e84130bbc9:40543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:54,807 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:32:54,807 INFO [RS:0;39e84130bbc9:40543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:54,807 INFO [RS:0;39e84130bbc9:40543 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:54,807 INFO [RS:0;39e84130bbc9:40543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:54,807 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:54,808 INFO [RS:0;39e84130bbc9:40543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40543 2024-11-13T18:32:54,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,40543,1731522730238 2024-11-13T18:32:54,811 INFO [RS:0;39e84130bbc9:40543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:54,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:54,811 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,40543,1731522730238] 2024-11-13T18:32:54,813 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,40543,1731522730238 already deleted, retry=false 2024-11-13T18:32:54,813 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,40543,1731522730238 expired; onlineServers=0 2024-11-13T18:32:54,813 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,32899,1731522730153' ***** 2024-11-13T18:32:54,814 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:32:54,814 DEBUG [M:0;39e84130bbc9:32899 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:32:54,814 DEBUG [M:0;39e84130bbc9:32899 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:32:54,814 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522730875 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522730875,5,FailOnTimeoutGroup] 2024-11-13T18:32:54,814 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:32:54,814 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522730875 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522730875,5,FailOnTimeoutGroup] 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:32:54,814 DEBUG [M:0;39e84130bbc9:32899 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:32:54,814 INFO [M:0;39e84130bbc9:32899 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:32:54,815 INFO [M:0;39e84130bbc9:32899 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:32:54,815 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:32:54,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:54,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:54,816 DEBUG [M:0;39e84130bbc9:32899 {}] zookeeper.ZKUtil(347): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:32:54,816 WARN [M:0;39e84130bbc9:32899 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:32:54,817 INFO [M:0;39e84130bbc9:32899 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/.lastflushedseqids 2024-11-13T18:32:54,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741907_1093 (size=130) 2024-11-13T18:32:54,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741907_1093 (size=130) 2024-11-13T18:32:54,824 INFO [M:0;39e84130bbc9:32899 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:32:54,824 INFO [M:0;39e84130bbc9:32899 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:32:54,824 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:54,824 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:54,824 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:54,825 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:54,825 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:54,825 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-13T18:32:54,843 DEBUG [M:0;39e84130bbc9:32899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98b1ea73a3a74264b946632239f0b8df is 82, key is hbase:meta,,1/info:regioninfo/1731522731689/Put/seqid=0 2024-11-13T18:32:54,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741908_1094 (size=5672) 2024-11-13T18:32:54,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741908_1094 (size=5672) 2024-11-13T18:32:54,849 INFO [M:0;39e84130bbc9:32899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98b1ea73a3a74264b946632239f0b8df 2024-11-13T18:32:54,874 DEBUG [M:0;39e84130bbc9:32899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8437a9b8b2a048a78de501609fb1aa58 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731522732305/Put/seqid=0 2024-11-13T18:32:54,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741909_1095 (size=6254) 2024-11-13T18:32:54,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741909_1095 (size=6254) 2024-11-13T18:32:54,890 INFO [M:0;39e84130bbc9:32899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8437a9b8b2a048a78de501609fb1aa58 2024-11-13T18:32:54,897 INFO [M:0;39e84130bbc9:32899 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8437a9b8b2a048a78de501609fb1aa58 2024-11-13T18:32:54,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:54,913 INFO [RS:0;39e84130bbc9:40543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:54,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40543-0x100ed5f0d300001, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:54,913 INFO [RS:0;39e84130bbc9:40543 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,40543,1731522730238; zookeeper connection closed. 2024-11-13T18:32:54,913 DEBUG [M:0;39e84130bbc9:32899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6657866ea3a14e6dbd382c96691f8903 is 69, key is 39e84130bbc9,38555,1731522731797/rs:state/1731522731840/Put/seqid=0 2024-11-13T18:32:54,913 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@450772c6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@450772c6 2024-11-13T18:32:54,913 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-13T18:32:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741910_1096 (size=5224) 2024-11-13T18:32:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741910_1096 (size=5224) 2024-11-13T18:32:54,919 INFO [M:0;39e84130bbc9:32899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6657866ea3a14e6dbd382c96691f8903 2024-11-13T18:32:54,941 DEBUG [M:0;39e84130bbc9:32899 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2345aea3ab54e0f9a042fbfb388dc5d is 52, key is load_balancer_on/state:d/1731522731778/Put/seqid=0 2024-11-13T18:32:54,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741911_1097 (size=5056) 2024-11-13T18:32:54,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741911_1097 (size=5056) 2024-11-13T18:32:54,947 INFO [M:0;39e84130bbc9:32899 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2345aea3ab54e0f9a042fbfb388dc5d 2024-11-13T18:32:54,953 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98b1ea73a3a74264b946632239f0b8df as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98b1ea73a3a74264b946632239f0b8df 2024-11-13T18:32:54,959 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98b1ea73a3a74264b946632239f0b8df, entries=8, sequenceid=60, filesize=5.5 K 2024-11-13T18:32:54,961 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8437a9b8b2a048a78de501609fb1aa58 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8437a9b8b2a048a78de501609fb1aa58 2024-11-13T18:32:54,966 INFO [M:0;39e84130bbc9:32899 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8437a9b8b2a048a78de501609fb1aa58 2024-11-13T18:32:54,967 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8437a9b8b2a048a78de501609fb1aa58, entries=6, sequenceid=60, filesize=6.1 K 2024-11-13T18:32:54,968 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6657866ea3a14e6dbd382c96691f8903 as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6657866ea3a14e6dbd382c96691f8903 2024-11-13T18:32:54,974 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6657866ea3a14e6dbd382c96691f8903, entries=2, sequenceid=60, filesize=5.1 K 2024-11-13T18:32:54,974 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c2345aea3ab54e0f9a042fbfb388dc5d as hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c2345aea3ab54e0f9a042fbfb388dc5d 2024-11-13T18:32:54,980 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c2345aea3ab54e0f9a042fbfb388dc5d, entries=1, sequenceid=60, filesize=4.9 K 2024-11-13T18:32:54,981 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=60, compaction requested=false 2024-11-13T18:32:54,982 INFO [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:54,982 DEBUG [M:0;39e84130bbc9:32899 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522774824Disabling compacts and flushes for region at 1731522774824Disabling writes for close at 1731522774825 (+1 ms)Obtaining lock to block concurrent updates at 1731522774825Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522774825Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1731522774825Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522774826 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522774826Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522774842 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522774842Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522774856 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522774873 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522774873Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522774897 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522774912 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522774912Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522774924 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522774940 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522774940Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6cf1fbd7: reopening flushed file at 1731522774952 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12d793b4: reopening flushed file at 1731522774959 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54681546: reopening flushed file at 1731522774967 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58fc4c30: reopening flushed file at 1731522774974 (+7 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 156ms, sequenceid=60, compaction requested=false at 1731522774981 (+7 ms)Writing region close event to WAL at 1731522774982 (+1 ms)Closed at 1731522774982 2024-11-13T18:32:54,983 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,983 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,983 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,983 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,983 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:32:54,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741892_1075 (size=1045) 2024-11-13T18:32:54,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741892_1075 (size=1045) 2024-11-13T18:32:55,150 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:32:55,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:32:55,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39163 is added to blk_1073741878_1061 (size=12911) 2024-11-13T18:32:55,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:55,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:55,985 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@109b74a9 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41943,null,null]) java.net.ConnectException: Call From 39e84130bbc9/172.17.0.3 to localhost:38845 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T18:32:56,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:56,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:56,899 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/WALs/39e84130bbc9,32899,1731522730153/39e84130bbc9%2C32899%2C1731522730153.1731522730774 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/oldWALs/39e84130bbc9%2C32899%2C1731522730153.1731522730774 2024-11-13T18:32:56,904 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/MasterData/oldWALs/39e84130bbc9%2C32899%2C1731522730153.1731522730774 to hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/oldWALs/39e84130bbc9%2C32899%2C1731522730153.1731522730774$masterlocalwal$ 2024-11-13T18:32:56,904 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:32:56,904 INFO [M:0;39e84130bbc9:32899 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:32:56,904 INFO [M:0;39e84130bbc9:32899 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:32899 2024-11-13T18:32:56,904 INFO [M:0;39e84130bbc9:32899 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:32:57,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:57,006 INFO [M:0;39e84130bbc9:32899 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:32:57,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32899-0x100ed5f0d300000, quorum=127.0.0.1:59783, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:32:57,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d95479b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:57,010 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@469c1f5d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:57,010 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:57,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1191c470{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:57,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13c2f5a4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:57,012 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:57,012 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:57,012 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid 39bde02c-5246-4f51-9588-c50d21cb7aca) service to localhost/127.0.0.1:34359 2024-11-13T18:32:57,012 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:57,013 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3b0efcc2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41943,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38845 , LocalHost:localPort 39e84130bbc9/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T18:32:57,013 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3b0efcc2 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1474286307-172.17.0.3-1731522729313:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39163,null,null], DatanodeInfoWithStorage[127.0.0.1:41943,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1474286307-172.17.0.3-1731522729313 2024-11-13T18:32:57,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data3/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:57,013 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3b0efcc2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39163,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1474286307-172.17.0.3-1731522729313 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:57,014 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3b0efcc2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41943,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1474286307-172.17.0.3-1731522729313 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:32:57,014 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data4/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:57,014 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3b0efcc2 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39163,null,null], DatanodeInfoWithStorage[127.0.0.1:41943,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1474286307-172.17.0.3-1731522729313:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:39163,null,null], DatanodeInfoWithStorage[127.0.0.1:41943,null,null]] 2024-11-13T18:32:57,014 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:57,016 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22d0350b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:57,017 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cc6081e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:57,017 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:57,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75255721{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:57,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@219c70cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:57,019 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:32:57,019 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:32:57,019 WARN [BP-1474286307-172.17.0.3-1731522729313 heartbeating to localhost/127.0.0.1:34359 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1474286307-172.17.0.3-1731522729313 (Datanode Uuid 5edcb6f9-0dac-4b71-89b6-b21d59ee6f2d) service to localhost/127.0.0.1:34359 2024-11-13T18:32:57,021 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:32:57,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data5/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:57,022 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/cluster_9772187e-0032-a02e-5553-61b305183d06/data/data6/current/BP-1474286307-172.17.0.3-1731522729313 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:32:57,023 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:32:57,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5519c514{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:57,031 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47f2ada2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:32:57,031 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:32:57,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35c95cb4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:32:57,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c48ca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir/,STOPPED} 2024-11-13T18:32:57,046 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:32:57,082 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:32:57,092 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 81) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007febe8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:32945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34359 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34359 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34359 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:34359 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34359 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34359 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34359 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:34359 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:32945 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007febe8bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=360 (was 376), ProcessCount=11 (was 11), AvailableMemoryMB=1699 (was 3197) 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=360, ProcessCount=11, AvailableMemoryMB=1699 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.log.dir so I do NOT create it in target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ad8670c4-e599-72f2-a098-d9558c868335/hadoop.tmp.dir so I do NOT create it in target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0, deleteOnExit=true 2024-11-13T18:32:57,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/test.cache.data in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:32:57,100 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:57,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:32:57,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:32:57,116 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:57,196 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:57,202 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:57,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:57,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:57,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:57,210 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:57,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13fdd007{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:57,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a92d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:57,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@ba4770c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-37621-hadoop-hdfs-3_4_1-tests_jar-_-any-15547714822114610976/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:32:57,397 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4702e786{HTTP/1.1, (http/1.1)}{localhost:37621} 2024-11-13T18:32:57,397 INFO [Time-limited test {}] server.Server(415): Started @153447ms 2024-11-13T18:32:57,417 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:32:57,499 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:57,504 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:57,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:57,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:57,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:57,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74ea1d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:57,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33255ae1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:57,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:57,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:57,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45628471{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-45867-hadoop-hdfs-3_4_1-tests_jar-_-any-18416711270408849312/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:57,665 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e6129e0{HTTP/1.1, (http/1.1)}{localhost:45867} 2024-11-13T18:32:57,666 INFO [Time-limited test {}] server.Server(415): Started @153717ms 2024-11-13T18:32:57,668 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:57,718 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:32:57,724 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:32:57,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:32:57,726 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:32:57,726 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:32:57,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4360f0f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:32:57,727 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@586e8021{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:32:57,782 WARN [Thread-1195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data1/current/BP-540785503-172.17.0.3-1731522777124/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:57,783 WARN [Thread-1196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data2/current/BP-540785503-172.17.0.3-1731522777124/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:57,831 WARN [Thread-1174 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:57,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8a0e08be1c81b92 with lease ID 0x9fd94f6d02e21e5f: Processing first storage report for DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6 from datanode DatanodeRegistration(127.0.0.1:33483, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=46353, infoSecurePort=0, ipcPort=41503, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124) 2024-11-13T18:32:57,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8a0e08be1c81b92 with lease ID 0x9fd94f6d02e21e5f: from storage DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6 node DatanodeRegistration(127.0.0.1:33483, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=46353, infoSecurePort=0, ipcPort=41503, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:57,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd8a0e08be1c81b92 with lease ID 0x9fd94f6d02e21e5f: Processing first storage report for DS-2ecb5ef2-1470-4505-bf2f-88a5946602e7 from datanode DatanodeRegistration(127.0.0.1:33483, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=46353, infoSecurePort=0, ipcPort=41503, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124) 2024-11-13T18:32:57,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd8a0e08be1c81b92 with lease ID 0x9fd94f6d02e21e5f: from storage DS-2ecb5ef2-1470-4505-bf2f-88a5946602e7 node DatanodeRegistration(127.0.0.1:33483, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=46353, infoSecurePort=0, ipcPort=41503, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:57,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a7cb65f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-32979-hadoop-hdfs-3_4_1-tests_jar-_-any-15304895014858536902/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:32:57,896 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49d720be{HTTP/1.1, (http/1.1)}{localhost:32979} 2024-11-13T18:32:57,896 INFO [Time-limited test {}] server.Server(415): Started @153947ms 2024-11-13T18:32:57,898 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:32:58,011 WARN [Thread-1221 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data3/current/BP-540785503-172.17.0.3-1731522777124/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:58,014 WARN [Thread-1222 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data4/current/BP-540785503-172.17.0.3-1731522777124/current, will proceed with Du for space computation calculation, 2024-11-13T18:32:58,038 WARN [Thread-1210 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:32:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35f7150f0aa4fa82 with lease ID 0x9fd94f6d02e21e60: Processing first storage report for DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1 from datanode DatanodeRegistration(127.0.0.1:40439, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=33655, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124) 2024-11-13T18:32:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35f7150f0aa4fa82 with lease ID 0x9fd94f6d02e21e60: from storage DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1 node DatanodeRegistration(127.0.0.1:40439, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=33655, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:32:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x35f7150f0aa4fa82 with lease ID 0x9fd94f6d02e21e60: Processing first storage report for DS-049abb32-1b3b-4e15-bc8f-81c63f5806cf from datanode DatanodeRegistration(127.0.0.1:40439, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=33655, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124) 2024-11-13T18:32:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x35f7150f0aa4fa82 with lease ID 0x9fd94f6d02e21e60: from storage DS-049abb32-1b3b-4e15-bc8f-81c63f5806cf node DatanodeRegistration(127.0.0.1:40439, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=33655, infoSecurePort=0, ipcPort=45735, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:32:58,141 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351 2024-11-13T18:32:58,144 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/zookeeper_0, clientPort=62512, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:32:58,145 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62512 2024-11-13T18:32:58,146 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:58,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:32:58,162 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285 with version=8 2024-11-13T18:32:58,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:32:58,164 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:58,164 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:32:58,165 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:58,166 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33957 2024-11-13T18:32:58,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33957 connecting to ZooKeeper ensemble=127.0.0.1:62512 2024-11-13T18:32:58,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339570x0, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:58,174 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33957-0x100ed5fc8c90000 connected 2024-11-13T18:32:58,199 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,201 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,205 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:58,205 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285, hbase.cluster.distributed=false 2024-11-13T18:32:58,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:58,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33957 2024-11-13T18:32:58,210 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33957 2024-11-13T18:32:58,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33957 2024-11-13T18:32:58,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33957 2024-11-13T18:32:58,211 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33957 2024-11-13T18:32:58,234 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:32:58,234 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:32:58,235 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:32:58,236 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38457 2024-11-13T18:32:58,237 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38457 connecting to ZooKeeper ensemble=127.0.0.1:62512 2024-11-13T18:32:58,238 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,240 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384570x0, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:32:58,244 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38457-0x100ed5fc8c90001 connected 2024-11-13T18:32:58,244 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:32:58,245 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:32:58,246 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:32:58,246 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:32:58,248 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:32:58,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38457 2024-11-13T18:32:58,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38457 2024-11-13T18:32:58,249 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38457 2024-11-13T18:32:58,249 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38457 2024-11-13T18:32:58,249 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38457 2024-11-13T18:32:58,261 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:33957 2024-11-13T18:32:58,262 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:58,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:58,264 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:32:58,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,267 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:32:58,267 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,33957,1731522778164 from backup master directory 2024-11-13T18:32:58,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:58,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:32:58,269 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:58,269 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,275 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/hbase.id] with ID: 9b64a11e-7658-44a7-b09d-2e99cb5500bf 2024-11-13T18:32:58,275 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/.tmp/hbase.id 2024-11-13T18:32:58,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:58,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:32:58,288 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/.tmp/hbase.id]:[hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/hbase.id] 2024-11-13T18:32:58,302 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:58,302 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:32:58,304 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T18:32:58,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:58,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:32:58,314 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:32:58,315 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:32:58,315 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:58,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:32:58,324 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store 2024-11-13T18:32:58,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:58,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:32:58,331 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:58,331 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:32:58,331 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:58,332 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:58,332 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:32:58,332 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:58,332 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:32:58,332 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522778331Disabling compacts and flushes for region at 1731522778331Disabling writes for close at 1731522778332 (+1 ms)Writing region close event to WAL at 1731522778332Closed at 1731522778332 2024-11-13T18:32:58,333 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/.initializing 2024-11-13T18:32:58,333 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,336 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C33957%2C1731522778164, suffix=, logDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/oldWALs, maxLogs=10 2024-11-13T18:32:58,337 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C33957%2C1731522778164.1731522778337 2024-11-13T18:32:58,346 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 2024-11-13T18:32:58,349 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46353:46353),(127.0.0.1/127.0.0.1:33655:33655)] 2024-11-13T18:32:58,350 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:58,350 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:58,350 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,350 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:32:58,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:58,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:32:58,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:58,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:32:58,358 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:58,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:32:58,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:58,364 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,364 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,365 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,367 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,367 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,368 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:32:58,369 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:32:58,372 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:58,373 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764049, jitterRate=-0.028462350368499756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:32:58,374 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522778350Initializing all the Stores at 1731522778351 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522778351Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522778351Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522778351Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522778351Cleaning up temporary data from old regions at 1731522778367 (+16 ms)Region opened successfully at 1731522778374 (+7 ms) 2024-11-13T18:32:58,377 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:32:58,383 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@415a0e22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:58,384 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:32:58,384 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:32:58,385 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:32:58,385 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:32:58,385 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:32:58,386 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:32:58,386 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:32:58,408 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:32:58,409 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:32:58,411 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:32:58,412 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:32:58,413 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:32:58,414 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:32:58,415 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:32:58,416 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:32:58,418 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:32:58,419 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:32:58,420 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:32:58,423 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:32:58,424 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:32:58,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:58,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:32:58,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,427 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,33957,1731522778164, sessionid=0x100ed5fc8c90000, setting cluster-up flag (Was=false) 2024-11-13T18:32:58,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,431 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,436 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:32:58,438 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,447 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:32:58,448 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,33957,1731522778164 2024-11-13T18:32:58,454 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:32:58,456 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:58,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:32:58,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:32:58,457 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,33957,1731522778164 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:32:58,460 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:58,460 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:58,460 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:58,460 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:32:58,460 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:32:58,461 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,461 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:58,461 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,463 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:58,463 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522808464 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:32:58,464 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,464 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,464 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:32:58,465 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:32:58,465 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:32:58,465 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:32:58,466 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:32:58,466 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:32:58,466 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522778466,5,FailOnTimeoutGroup] 2024-11-13T18:32:58,469 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522778466,5,FailOnTimeoutGroup] 2024-11-13T18:32:58,469 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,469 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:32:58,469 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,469 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:58,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:32:58,481 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:32:58,481 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285 2024-11-13T18:32:58,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:32:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:32:58,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:58,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:58,495 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:58,495 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:58,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:58,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:58,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:58,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:58,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:58,500 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:58,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:58,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:58,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:58,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:58,502 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:58,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740 2024-11-13T18:32:58,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740 2024-11-13T18:32:58,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:58,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:58,506 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:58,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:58,514 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:58,514 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716099, jitterRate=-0.08943350613117218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522778491Initializing all the Stores at 1731522778492 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522778492Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522778492Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522778492Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522778492Cleaning up temporary data from old regions at 1731522778505 (+13 ms)Region opened successfully at 1731522778515 (+10 ms) 2024-11-13T18:32:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:32:58,515 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:32:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:32:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:32:58,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:32:58,516 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:32:58,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522778515Disabling compacts and flushes for region at 1731522778515Disabling writes for close at 1731522778515Writing region close event to WAL at 1731522778516 (+1 ms)Closed at 1731522778516 2024-11-13T18:32:58,517 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:58,517 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:32:58,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:32:58,519 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:58,520 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:32:58,552 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(746): ClusterId : 9b64a11e-7658-44a7-b09d-2e99cb5500bf 2024-11-13T18:32:58,552 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:32:58,555 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:32:58,555 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:32:58,557 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:32:58,558 DEBUG [RS:0;39e84130bbc9:38457 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502bcdb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:32:58,584 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:38457 2024-11-13T18:32:58,584 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:32:58,584 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:32:58,584 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:32:58,585 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,33957,1731522778164 with port=38457, startcode=1731522778234 2024-11-13T18:32:58,585 DEBUG [RS:0;39e84130bbc9:38457 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:32:58,588 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60619, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:32:58,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33957 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,589 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33957 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,591 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285 2024-11-13T18:32:58,591 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35003 2024-11-13T18:32:58,591 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:32:58,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:32:58,593 DEBUG [RS:0;39e84130bbc9:38457 {}] zookeeper.ZKUtil(111): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,593 WARN [RS:0;39e84130bbc9:38457 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:32:58,594 INFO [RS:0;39e84130bbc9:38457 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:58,594 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,598 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,38457,1731522778234] 2024-11-13T18:32:58,609 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:32:58,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:58,613 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:32:58,613 INFO [RS:0;39e84130bbc9:38457 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:32:58,613 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,614 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:32:58,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:58,617 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:32:58,618 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:58,618 DEBUG [RS:0;39e84130bbc9:38457 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,620 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,38457,1731522778234-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:58,644 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:32:58,645 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,38457,1731522778234-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,645 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,645 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.Replication(171): 39e84130bbc9,38457,1731522778234 started 2024-11-13T18:32:58,667 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:58,668 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,38457,1731522778234, RpcServer on 39e84130bbc9/172.17.0.3:38457, sessionid=0x100ed5fc8c90001 2024-11-13T18:32:58,668 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:32:58,668 DEBUG [RS:0;39e84130bbc9:38457 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,668 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,38457,1731522778234' 2024-11-13T18:32:58,668 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,38457,1731522778234' 2024-11-13T18:32:58,669 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:32:58,671 WARN [39e84130bbc9:33957 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:32:58,673 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:32:58,677 DEBUG [RS:0;39e84130bbc9:38457 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:32:58,677 INFO [RS:0;39e84130bbc9:38457 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:32:58,677 INFO [RS:0;39e84130bbc9:38457 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:32:58,780 INFO [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C38457%2C1731522778234, suffix=, logDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs, maxLogs=32 2024-11-13T18:32:58,781 INFO [RS:0;39e84130bbc9:38457 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:32:58,819 INFO [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:32:58,821 DEBUG [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33655:33655),(127.0.0.1/127.0.0.1:46353:46353)] 2024-11-13T18:32:58,921 DEBUG [39e84130bbc9:33957 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:32:58,922 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:58,924 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,38457,1731522778234, state=OPENING 2024-11-13T18:32:58,926 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:32:58,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:32:58,928 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:58,928 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:58,928 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:32:58,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,38457,1731522778234}] 2024-11-13T18:32:59,086 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:32:59,109 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:32:59,115 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:32:59,115 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:32:59,132 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C38457%2C1731522778234.meta, suffix=.meta, logDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs, maxLogs=32 2024-11-13T18:32:59,133 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta 2024-11-13T18:32:59,184 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta 2024-11-13T18:32:59,189 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33655:33655),(127.0.0.1/127.0.0.1:46353:46353)] 2024-11-13T18:32:59,190 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:32:59,191 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:32:59,191 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:32:59,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:32:59,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:32:59,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:59,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:32:59,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:32:59,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:59,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:32:59,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:32:59,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:59,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:32:59,205 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:32:59,205 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,206 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:32:59,206 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:32:59,207 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740 2024-11-13T18:32:59,208 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740 2024-11-13T18:32:59,210 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:32:59,210 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:32:59,211 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:32:59,213 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:32:59,214 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874754, jitterRate=0.11230792105197906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:32:59,214 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:32:59,215 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522779192Writing region info on filesystem at 1731522779192Initializing all the Stores at 1731522779193 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522779193Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522779193Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522779193Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522779193Cleaning up temporary data from old regions at 1731522779210 (+17 ms)Running coprocessor post-open hooks at 1731522779214 (+4 ms)Region opened successfully at 1731522779215 (+1 ms) 2024-11-13T18:32:59,216 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522779085 2024-11-13T18:32:59,219 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:32:59,219 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:32:59,220 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:59,222 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,38457,1731522778234, state=OPEN 2024-11-13T18:32:59,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:59,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:59,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:32:59,228 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:32:59,228 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:59,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:32:59,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,38457,1731522778234 in 300 msec 2024-11-13T18:32:59,237 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:32:59,237 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 714 msec 2024-11-13T18:32:59,238 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:32:59,238 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:32:59,240 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:59,240 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,38457,1731522778234, seqNum=-1] 2024-11-13T18:32:59,241 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:59,242 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51185, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:59,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 793 msec 2024-11-13T18:32:59,250 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522779250, completionTime=-1 2024-11-13T18:32:59,250 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:32:59,250 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:32:59,252 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522839252 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522899253 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:33957, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,254 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,256 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.990sec 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:32:59,259 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:32:59,264 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:32:59,264 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:32:59,264 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33957,1731522778164-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:32:59,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a1758ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:59,355 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,33957,-1 for getting cluster id 2024-11-13T18:32:59,355 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:32:59,357 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b64a11e-7658-44a7-b09d-2e99cb5500bf' 2024-11-13T18:32:59,358 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:32:59,358 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b64a11e-7658-44a7-b09d-2e99cb5500bf" 2024-11-13T18:32:59,358 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b0d13f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:59,358 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,33957,-1] 2024-11-13T18:32:59,359 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:32:59,359 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:32:59,361 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43780, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:32:59,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a17641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:32:59,362 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:32:59,365 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,38457,1731522778234, seqNum=-1] 2024-11-13T18:32:59,366 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:32:59,374 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:32:59,377 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,33957,1731522778164 2024-11-13T18:32:59,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:32:59,381 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:32:59,381 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-13T18:32:59,381 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-13T18:32:59,382 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T18:32:59,383 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 39e84130bbc9,33957,1731522778164 2024-11-13T18:32:59,383 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@36588eb 2024-11-13T18:32:59,384 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T18:32:59,395 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T18:32:59,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T18:32:59,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T18:32:59,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:32:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T18:32:59,401 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T18:32:59,401 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-13T18:32:59,403 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T18:32:59,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:32:59,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741835_1011 (size=395) 2024-11-13T18:32:59,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741835_1011 (size=395) 2024-11-13T18:32:59,435 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 09f9c1fb89fb55f5e720c9b8dd00db15, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285 2024-11-13T18:32:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40439 is added to blk_1073741836_1012 (size=78) 2024-11-13T18:32:59,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33483 is added to blk_1073741836_1012 (size=78) 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 09f9c1fb89fb55f5e720c9b8dd00db15, disabling compactions & flushes 2024-11-13T18:32:59,456 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. after waiting 0 ms 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,456 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,456 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 09f9c1fb89fb55f5e720c9b8dd00db15: Waiting for close lock at 1731522779456Disabling compacts and flushes for region at 1731522779456Disabling writes for close at 1731522779456Writing region close event to WAL at 1731522779456Closed at 1731522779456 2024-11-13T18:32:59,460 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T18:32:59,461 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731522779461"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522779461"}]},"ts":"1731522779461"} 2024-11-13T18:32:59,464 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T18:32:59,465 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T18:32:59,466 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522779465"}]},"ts":"1731522779465"} 2024-11-13T18:32:59,468 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-13T18:32:59,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=09f9c1fb89fb55f5e720c9b8dd00db15, ASSIGN}] 2024-11-13T18:32:59,470 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=09f9c1fb89fb55f5e720c9b8dd00db15, ASSIGN 2024-11-13T18:32:59,471 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=09f9c1fb89fb55f5e720c9b8dd00db15, ASSIGN; state=OFFLINE, location=39e84130bbc9,38457,1731522778234; forceNewPlan=false, retain=false 2024-11-13T18:32:59,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:59,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:32:59,622 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=09f9c1fb89fb55f5e720c9b8dd00db15, regionState=OPENING, regionLocation=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:59,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=09f9c1fb89fb55f5e720c9b8dd00db15, ASSIGN because future has completed 2024-11-13T18:32:59,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 09f9c1fb89fb55f5e720c9b8dd00db15, server=39e84130bbc9,38457,1731522778234}] 2024-11-13T18:32:59,790 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,790 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 09f9c1fb89fb55f5e720c9b8dd00db15, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:32:59,790 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,791 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:32:59,791 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,791 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,792 INFO [StoreOpener-09f9c1fb89fb55f5e720c9b8dd00db15-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:32:59,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T18:32:59,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:32:59,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T18:32:59,794 INFO [StoreOpener-09f9c1fb89fb55f5e720c9b8dd00db15-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 09f9c1fb89fb55f5e720c9b8dd00db15 columnFamilyName info 2024-11-13T18:32:59,794 DEBUG [StoreOpener-09f9c1fb89fb55f5e720c9b8dd00db15-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:32:59,795 INFO [StoreOpener-09f9c1fb89fb55f5e720c9b8dd00db15-1 {}] regionserver.HStore(327): Store=09f9c1fb89fb55f5e720c9b8dd00db15/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:32:59,795 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,796 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,796 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,797 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,797 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,799 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,803 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:32:59,803 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 09f9c1fb89fb55f5e720c9b8dd00db15; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733716, jitterRate=-0.06703272461891174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:32:59,804 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:32:59,804 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 09f9c1fb89fb55f5e720c9b8dd00db15: Running coprocessor pre-open hook at 1731522779791Writing region info on filesystem at 1731522779791Initializing all the Stores at 1731522779792 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522779792Cleaning up temporary data from old regions at 1731522779797 (+5 ms)Running coprocessor post-open hooks at 1731522779804 (+7 ms)Region opened successfully at 1731522779804 2024-11-13T18:32:59,806 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15., pid=6, masterSystemTime=1731522779784 2024-11-13T18:32:59,809 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,809 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:32:59,810 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=09f9c1fb89fb55f5e720c9b8dd00db15, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,38457,1731522778234 2024-11-13T18:32:59,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 09f9c1fb89fb55f5e720c9b8dd00db15, server=39e84130bbc9,38457,1731522778234 because future has completed 2024-11-13T18:32:59,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T18:32:59,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 09f9c1fb89fb55f5e720c9b8dd00db15, server=39e84130bbc9,38457,1731522778234 in 191 msec 2024-11-13T18:32:59,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T18:32:59,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=09f9c1fb89fb55f5e720c9b8dd00db15, ASSIGN in 354 msec 2024-11-13T18:32:59,827 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T18:32:59,827 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522779827"}]},"ts":"1731522779827"} 2024-11-13T18:32:59,830 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-13T18:32:59,831 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T18:32:59,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 434 msec 2024-11-13T18:33:00,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:00,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:01,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:01,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:02,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:02,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:03,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:03,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:04,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:04,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:04,694 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:33:04,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,727 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:04,732 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T18:33:04,732 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-13T18:33:05,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:05,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:06,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:06,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:07,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:07,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:08,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:08,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:09,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33957 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:33:09,434 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-13T18:33:09,434 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-13T18:33:09,438 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T18:33:09,439 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:09,443 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15., hostname=39e84130bbc9,38457,1731522778234, seqNum=2] 2024-11-13T18:33:09,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:09,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:09,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T18:33:09,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-13T18:33:10,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:10,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:11,446 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:11,447 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,447 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,447 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK], DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]) is bad. 2024-11-13T18:33:11,447 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK], DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]) is bad. 2024-11-13T18:33:11,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:50092 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50092 dst: /127.0.0.1:40439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,448 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:39376 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39376 dst: /127.0.0.1:33483 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,448 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK], DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40439,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]) is bad. 2024-11-13T18:33:11,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:50078 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50078 dst: /127.0.0.1:40439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:39360 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39360 dst: /127.0.0.1:33483 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,449 WARN [PacketResponder: BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40439] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,449 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-803805951_22 at /127.0.0.1:39320 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39320 dst: /127.0.0.1:33483 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,450 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-803805951_22 at /127.0.0.1:50050 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40439:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50050 dst: /127.0.0.1:40439 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,466 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a7cb65f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:11,466 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49d720be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:11,466 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:11,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@586e8021{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:11,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4360f0f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:11,469 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:11,469 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:11,469 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid fe459b16-77e9-4868-b359-df1876012c7c) service to localhost/127.0.0.1:35003 2024-11-13T18:33:11,469 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:11,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data3/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:11,469 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data4/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:11,470 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:11,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:11,489 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:11,490 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:11,490 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:11,491 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:33:11,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68e23717{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:11,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67df601f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:11,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:11,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:11,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71af5be2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-37623-hadoop-hdfs-3_4_1-tests_jar-_-any-3572077066144613194/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:11,627 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a1e644f{HTTP/1.1, (http/1.1)}{localhost:37623} 2024-11-13T18:33:11,627 INFO [Time-limited test {}] server.Server(415): Started @167677ms 2024-11-13T18:33:11,628 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:11,676 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,676 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,676 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:11,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:52772 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52772 dst: /127.0.0.1:33483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,677 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-803805951_22 at /127.0.0.1:52770 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52770 dst: /127.0.0.1:33483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,678 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:52774 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33483:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52774 dst: /127.0.0.1:33483 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:11,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45628471{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:11,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e6129e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:11,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:11,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33255ae1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:11,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74ea1d44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:11,690 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:11,690 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:11,690 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:11,690 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid 17ec2018-212d-437d-bacb-09db82eca4a1) service to localhost/127.0.0.1:35003 2024-11-13T18:33:11,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data1/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:11,691 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data2/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:11,691 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:11,713 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:11,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:11,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:11,719 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:11,719 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:33:11,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31267d1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:11,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b9f4dad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:11,774 WARN [Thread-1345 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc61ee6de012351a with lease ID 0x9fd94f6d02e21e61: from storage DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1 node DatanodeRegistration(127.0.0.1:43891, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=38175, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:33:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc61ee6de012351a with lease ID 0x9fd94f6d02e21e61: from storage DS-049abb32-1b3b-4e15-bc8f-81c63f5806cf node DatanodeRegistration(127.0.0.1:43891, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=38175, infoSecurePort=0, ipcPort=37041, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:11,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48dc7e0c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-39325-hadoop-hdfs-3_4_1-tests_jar-_-any-6679349080544612932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:11,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6beed7e8{HTTP/1.1, (http/1.1)}{localhost:39325} 2024-11-13T18:33:11,855 INFO [Time-limited test {}] server.Server(415): Started @167906ms 2024-11-13T18:33:11,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:11,963 WARN [Thread-1376 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:11,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe18f55b4da67a3d with lease ID 0x9fd94f6d02e21e62: from storage DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6 node DatanodeRegistration(127.0.0.1:39793, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=40535, infoSecurePort=0, ipcPort=34771, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:33:11,967 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe18f55b4da67a3d with lease ID 0x9fd94f6d02e21e62: from storage DS-2ecb5ef2-1470-4505-bf2f-88a5946602e7 node DatanodeRegistration(127.0.0.1:39793, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=40535, infoSecurePort=0, ipcPort=34771, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:12,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:12,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:12,894 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-13T18:33:12,896 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-13T18:33:12,897 ERROR [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:12,898 WARN [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:12,898 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C38457%2C1731522778234:(num 1731522778781) roll requested 2024-11-13T18:33:12,898 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:12,904 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 newFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:12,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:12,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:12,905 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:12,905 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:12,905 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:12,905 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:12,906 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:12,906 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:12,906 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:12,906 WARN [IPC Server handler 1 on default port 35003 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-13T18:33:12,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 after 1ms 2024-11-13T18:33:12,909 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40535:40535),(127.0.0.1/127.0.0.1:38175:38175)] 2024-11-13T18:33:12,909 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 is not closed yet, will try archiving it next time 2024-11-13T18:33:13,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:13,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:14,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:14,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:14,913 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-13T18:33:15,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:15,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:15,780 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T18:33:16,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:16,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:16,907 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 after 4001ms 2024-11-13T18:33:16,917 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:16,917 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39793,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK], DatanodeInfoWithStorage[127.0.0.1:43891,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39793,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]) is bad. 2024-11-13T18:33:16,917 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:35548 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39793:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35548 dst: /127.0.0.1:39793 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:16,918 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:49960 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43891:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49960 dst: /127.0.0.1:43891 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:16,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48dc7e0c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:16,921 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6beed7e8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:16,921 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:16,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b9f4dad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:16,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31267d1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:16,923 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:16,923 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:16,923 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid 17ec2018-212d-437d-bacb-09db82eca4a1) service to localhost/127.0.0.1:35003 2024-11-13T18:33:16,923 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:16,924 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data1/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:16,924 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data2/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:16,924 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:16,940 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:16,943 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:16,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:16,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:16,946 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:33:16,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20432799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:16,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2378632b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:17,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4eef3a93{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-45371-hadoop-hdfs-3_4_1-tests_jar-_-any-12895358949244354028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:17,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d8da7be{HTTP/1.1, (http/1.1)}{localhost:45371} 2024-11-13T18:33:17,075 INFO [Time-limited test {}] server.Server(415): Started @173125ms 2024-11-13T18:33:17,076 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:17,096 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:17,097 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335895455_22 at /127.0.0.1:49984 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43891:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49984 dst: /127.0.0.1:43891 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:17,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71af5be2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:17,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a1e644f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:17,115 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:17,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67df601f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:17,116 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68e23717{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:17,118 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:17,118 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:17,118 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:17,118 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid fe459b16-77e9-4868-b359-df1876012c7c) service to localhost/127.0.0.1:35003 2024-11-13T18:33:17,118 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:17,118 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data4/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:17,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data3/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:17,141 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:17,145 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:17,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:17,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:17,147 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:33:17,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1decdda3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:17,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4251f41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:17,181 WARN [Thread-1419 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1933f99e7b690ee5 with lease ID 0x9fd94f6d02e21e63: from storage DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6 node DatanodeRegistration(127.0.0.1:32847, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=45643, infoSecurePort=0, ipcPort=33919, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1933f99e7b690ee5 with lease ID 0x9fd94f6d02e21e63: from storage DS-2ecb5ef2-1470-4505-bf2f-88a5946602e7 node DatanodeRegistration(127.0.0.1:32847, datanodeUuid=17ec2018-212d-437d-bacb-09db82eca4a1, infoPort=45643, infoSecurePort=0, ipcPort=33919, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:17,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22586a60{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/java.io.tmpdir/jetty-localhost-44615-hadoop-hdfs-3_4_1-tests_jar-_-any-781840299992255768/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:17,315 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5684bce2{HTTP/1.1, (http/1.1)}{localhost:44615} 2024-11-13T18:33:17,315 INFO [Time-limited test {}] server.Server(415): Started @173366ms 2024-11-13T18:33:17,317 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:17,420 WARN [Thread-1450 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:17,423 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b600e08b9068ec6 with lease ID 0x9fd94f6d02e21e64: from storage DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1 node DatanodeRegistration(127.0.0.1:40421, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=38525, infoSecurePort=0, ipcPort=34805, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:17,423 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b600e08b9068ec6 with lease ID 0x9fd94f6d02e21e64: from storage DS-049abb32-1b3b-4e15-bc8f-81c63f5806cf node DatanodeRegistration(127.0.0.1:40421, datanodeUuid=fe459b16-77e9-4868-b359-df1876012c7c, infoPort=38525, infoSecurePort=0, ipcPort=34805, storageInfo=lv=-57;cid=testClusterID;nsid=267676525;c=1731522777124), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:17,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:17,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:18,341 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-13T18:33:18,343 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-13T18:33:18,344 ERROR [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43891,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:18,345 WARN [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43891,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:18,345 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C38457%2C1731522778234:(num 1731522792898) roll requested 2024-11-13T18:33:18,345 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:18,354 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 newFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:18,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:18,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:18,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:18,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:18,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:18,355 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:18,355 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43891,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:18,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43891,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:18,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:18,356 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45643:45643),(127.0.0.1/127.0.0.1:38525:38525)] 2024-11-13T18:33:18,356 WARN [IPC Server handler 4 on default port 35003 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-13T18:33:18,356 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 is not closed yet, will try archiving it next time 2024-11-13T18:33:18,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 after 0ms 2024-11-13T18:33:18,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:18,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:19,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:19,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:20,358 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:20,365 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 newFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:20,365 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:20,369 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:20,369 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:20,369 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:20,369 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:20,370 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:20,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38525:38525),(127.0.0.1/127.0.0.1:45643:45643)] 2024-11-13T18:33:20,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 is not closed yet, will try archiving it next time 2024-11-13T18:33:20,371 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 is not closed yet, will try archiving it next time 2024-11-13T18:33:20,372 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:20,372 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:20,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741838_1019 (size=1264) 2024-11-13T18:33:20,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741838_1019 (size=1264) 2024-11-13T18:33:20,373 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 after 1ms 2024-11-13T18:33:20,373 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:20,374 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 is not closed yet, will try archiving it next time 2024-11-13T18:33:20,385 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731522779804/Put/vlen=218/seqid=0] 2024-11-13T18:33:20,385 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731522789444/Put/vlen=1045/seqid=0] 2024-11-13T18:33:20,385 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522778781 2024-11-13T18:33:20,385 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:20,385 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:20,386 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 after 1ms 2024-11-13T18:33:20,386 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:20,390 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731522792897/Put/vlen=1045/seqid=0] 2024-11-13T18:33:20,390 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731522794914/Put/vlen=1045/seqid=0] 2024-11-13T18:33:20,390 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 2024-11-13T18:33:20,390 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:20,390 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:20,391 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 after 1ms 2024-11-13T18:33:20,391 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522798345 2024-11-13T18:33:20,394 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731522798344/Put/vlen=1045/seqid=0] 2024-11-13T18:33:20,394 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:20,394 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:20,395 WARN [IPC Server handler 3 on default port 35003 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-13T18:33:20,395 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 after 1ms 2024-11-13T18:33:20,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:20,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:21,193 WARN [ResponseProcessor for block BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:21,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-803805951_22 at /127.0.0.1:58552 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40421:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58552 dst: /127.0.0.1:40421 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40421 remote=/127.0.0.1:58552]. Total timeout mills is 60000, 59172 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:21,193 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 block BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40421,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK], DatanodeInfoWithStorage[127.0.0.1:32847,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40421,DS-078a9c1e-ba32-4f62-9a9f-03676d10c7d1,DISK]) is bad. 2024-11-13T18:33:21,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-803805951_22 at /127.0.0.1:56404 [Receiving block BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32847:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56404 dst: /127.0.0.1:32847 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:21,194 WARN [DataStreamer for file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 block BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:21,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741839_1022 (size=85) 2024-11-13T18:33:21,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:21,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:22,188 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T18:33:22,358 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522792898 after 4001ms 2024-11-13T18:33:22,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:22,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:23,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:23,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:24,396 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 after 4002ms 2024-11-13T18:33:24,396 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:24,400 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:24,401 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-13T18:33:24,401 ERROR [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,401 WARN [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,402 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C38457%2C1731522778234.meta:.meta(num 1731522779133) roll requested 2024-11-13T18:33:24,402 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.meta.1731522804402.meta 2024-11-13T18:33:24,411 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,411 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,411 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,411 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,411 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,412 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522804402.meta 2024-11-13T18:33:24,412 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,412 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,412 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta 2024-11-13T18:33:24,413 WARN [IPC Server handler 1 on default port 35003 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-13T18:33:24,413 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta after 1ms 2024-11-13T18:33:24,421 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45643:45643),(127.0.0.1/127.0.0.1:38525:38525)] 2024-11-13T18:33:24,421 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta is not closed yet, will try archiving it next time 2024-11-13T18:33:24,445 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/info/36719df645f04ef5babf795076b614ca is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15./info:regioninfo/1731522779810/Put/seqid=0 2024-11-13T18:33:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741841_1025 (size=7125) 2024-11-13T18:33:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741841_1025 (size=7125) 2024-11-13T18:33:24,455 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/info/36719df645f04ef5babf795076b614ca 2024-11-13T18:33:24,488 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/ns/9a63d26cf18c4f9ca87c0313d20c58a2 is 43, key is default/ns:d/1731522779243/Put/seqid=0 2024-11-13T18:33:24,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741842_1026 (size=5153) 2024-11-13T18:33:24,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741842_1026 (size=5153) 2024-11-13T18:33:24,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:24,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:24,901 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/ns/9a63d26cf18c4f9ca87c0313d20c58a2 2024-11-13T18:33:24,931 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/table/6b6f3cb73cf94ac6a6e20631d363191c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731522779827/Put/seqid=0 2024-11-13T18:33:24,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741843_1027 (size=5438) 2024-11-13T18:33:24,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741843_1027 (size=5438) 2024-11-13T18:33:24,945 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/table/6b6f3cb73cf94ac6a6e20631d363191c 2024-11-13T18:33:24,958 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/info/36719df645f04ef5babf795076b614ca as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/info/36719df645f04ef5babf795076b614ca 2024-11-13T18:33:24,964 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/info/36719df645f04ef5babf795076b614ca, entries=10, sequenceid=11, filesize=7.0 K 2024-11-13T18:33:24,965 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/ns/9a63d26cf18c4f9ca87c0313d20c58a2 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/ns/9a63d26cf18c4f9ca87c0313d20c58a2 2024-11-13T18:33:24,972 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/ns/9a63d26cf18c4f9ca87c0313d20c58a2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T18:33:24,973 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/.tmp/table/6b6f3cb73cf94ac6a6e20631d363191c as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/table/6b6f3cb73cf94ac6a6e20631d363191c 2024-11-13T18:33:24,981 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/table/6b6f3cb73cf94ac6a6e20631d363191c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T18:33:24,982 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 581ms, sequenceid=11, compaction requested=false 2024-11-13T18:33:24,982 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T18:33:24,983 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 09f9c1fb89fb55f5e720c9b8dd00db15 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-13T18:33:24,983 ERROR [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,983 WARN [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285-prefix:39e84130bbc9,38457,1731522778234 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,984 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C38457%2C1731522778234:(num 1731522800358) roll requested 2024-11-13T18:33:24,984 INFO [regionserver/39e84130bbc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C38457%2C1731522778234.1731522804984 2024-11-13T18:33:24,998 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 newFile=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522804984 2024-11-13T18:33:24,998 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,998 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,998 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,998 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,998 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:24,999 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522804984 2024-11-13T18:33:24,999 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:24,999 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-540785503-172.17.0.3-1731522777124:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:25,000 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:25,000 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 after 0ms 2024-11-13T18:33:25,008 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.1731522800358 to hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs/39e84130bbc9%2C38457%2C1731522778234.1731522800358 2024-11-13T18:33:25,017 DEBUG [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38525:38525),(127.0.0.1/127.0.0.1:45643:45643)] 2024-11-13T18:33:25,040 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/.tmp/info/213bfea9d8d2498fba24efd592587fe3 is 1080, key is row1002/info:/1731522789444/Put/seqid=0 2024-11-13T18:33:25,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741845_1029 (size=9270) 2024-11-13T18:33:25,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741845_1029 (size=9270) 2024-11-13T18:33:25,448 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/.tmp/info/213bfea9d8d2498fba24efd592587fe3 2024-11-13T18:33:25,463 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/.tmp/info/213bfea9d8d2498fba24efd592587fe3 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/info/213bfea9d8d2498fba24efd592587fe3 2024-11-13T18:33:25,470 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/info/213bfea9d8d2498fba24efd592587fe3, entries=4, sequenceid=8, filesize=9.1 K 2024-11-13T18:33:25,471 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 09f9c1fb89fb55f5e720c9b8dd00db15 in 489ms, sequenceid=8, compaction requested=false 2024-11-13T18:33:25,471 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 09f9c1fb89fb55f5e720c9b8dd00db15: 2024-11-13T18:33:25,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:33:25,478 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:33:25,478 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:33:25,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:25,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:25,479 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:33:25,479 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:33:25,479 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1945721467, stopped=false 2024-11-13T18:33:25,479 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,33957,1731522778164 2024-11-13T18:33:25,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:33:25,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:25,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:33:25,481 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:33:25,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:25,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:33:25,482 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:33:25,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:33:25,482 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:33:25,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:25,483 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,38457,1731522778234' ***** 2024-11-13T18:33:25,483 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:33:25,483 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:33:25,483 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(3091): Received CLOSE for 09f9c1fb89fb55f5e720c9b8dd00db15 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,38457,1731522778234 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:38457. 2024-11-13T18:33:25,484 DEBUG [RS:0;39e84130bbc9:38457 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:33:25,484 DEBUG [RS:0;39e84130bbc9:38457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:33:25,484 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:33:25,485 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T18:33:25,485 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 09f9c1fb89fb55f5e720c9b8dd00db15=TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15.} 2024-11-13T18:33:25,485 DEBUG [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1351): Waiting on 09f9c1fb89fb55f5e720c9b8dd00db15, 1588230740 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 09f9c1fb89fb55f5e720c9b8dd00db15, disabling compactions & flushes 2024-11-13T18:33:25,485 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. after waiting 0 ms 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:33:25,485 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:33:25,485 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:33:25,486 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:33:25,486 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:33:25,494 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/default/TestLogRolling-testLogRollOnPipelineRestart/09f9c1fb89fb55f5e720c9b8dd00db15/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-13T18:33:25,495 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:25,495 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 09f9c1fb89fb55f5e720c9b8dd00db15: Waiting for close lock at 1731522805485Running coprocessor pre-close hooks at 1731522805485Disabling compacts and flushes for region at 1731522805485Disabling writes for close at 1731522805485Writing region close event to WAL at 1731522805487 (+2 ms)Running coprocessor post-close hooks at 1731522805495 (+8 ms)Closed at 1731522805495 2024-11-13T18:33:25,495 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731522779396.09f9c1fb89fb55f5e720c9b8dd00db15. 2024-11-13T18:33:25,497 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T18:33:25,498 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:33:25,498 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:33:25,498 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522805485Running coprocessor pre-close hooks at 1731522805485Disabling compacts and flushes for region at 1731522805485Disabling writes for close at 1731522805486 (+1 ms)Writing region close event to WAL at 1731522805494 (+8 ms)Running coprocessor post-close hooks at 1731522805498 (+4 ms)Closed at 1731522805498 2024-11-13T18:33:25,498 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:33:25,621 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T18:33:25,621 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T18:33:25,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:25,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:25,685 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,38457,1731522778234; all regions closed. 2024-11-13T18:33:25,686 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:25,686 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:25,686 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:25,686 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:25,686 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:25,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741840_1023 (size=825) 2024-11-13T18:33:25,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741840_1023 (size=825) 2024-11-13T18:33:26,622 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:33:26,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:26,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:27,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:27,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:28,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:33:28,414 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta after 4002ms 2024-11-13T18:33:28,414 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/WALs/39e84130bbc9,38457,1731522778234/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta to hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs/39e84130bbc9%2C38457%2C1731522778234.meta.1731522779133.meta 2024-11-13T18:33:28,417 DEBUG [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs 2024-11-13T18:33:28,417 INFO [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C38457%2C1731522778234.meta:.meta(num 1731522804402) 2024-11-13T18:33:28,418 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,418 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,418 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,418 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741844_1028 (size=1162) 2024-11-13T18:33:28,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741844_1028 (size=1162) 2024-11-13T18:33:28,422 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T18:33:28,426 DEBUG [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs 2024-11-13T18:33:28,426 INFO [RS:0;39e84130bbc9:38457 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C38457%2C1731522778234:(num 1731522804984) 2024-11-13T18:33:28,426 DEBUG [RS:0;39e84130bbc9:38457 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:28,426 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:33:28,426 INFO [RS:0;39e84130bbc9:38457 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:33:28,426 INFO [RS:0;39e84130bbc9:38457 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:33:28,427 INFO [RS:0;39e84130bbc9:38457 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:33:28,427 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:33:28,427 INFO [RS:0;39e84130bbc9:38457 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38457 2024-11-13T18:33:28,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,38457,1731522778234 2024-11-13T18:33:28,429 INFO [RS:0;39e84130bbc9:38457 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:33:28,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:33:28,431 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,38457,1731522778234] 2024-11-13T18:33:28,432 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,38457,1731522778234 already deleted, retry=false 2024-11-13T18:33:28,432 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,38457,1731522778234 expired; onlineServers=0 2024-11-13T18:33:28,432 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,33957,1731522778164' ***** 2024-11-13T18:33:28,432 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:33:28,432 INFO [M:0;39e84130bbc9:33957 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:33:28,432 INFO [M:0;39e84130bbc9:33957 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:33:28,432 DEBUG [M:0;39e84130bbc9:33957 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:33:28,433 DEBUG [M:0;39e84130bbc9:33957 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:33:28,433 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:33:28,433 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522778466 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522778466,5,FailOnTimeoutGroup] 2024-11-13T18:33:28,433 INFO [M:0;39e84130bbc9:33957 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:33:28,433 INFO [M:0;39e84130bbc9:33957 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:33:28,433 DEBUG [M:0;39e84130bbc9:33957 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:33:28,433 INFO [M:0;39e84130bbc9:33957 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:33:28,433 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522778466 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522778466,5,FailOnTimeoutGroup] 2024-11-13T18:33:28,433 INFO [M:0;39e84130bbc9:33957 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:33:28,433 INFO [M:0;39e84130bbc9:33957 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:33:28,433 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:33:28,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:33:28,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:28,434 DEBUG [M:0;39e84130bbc9:33957 {}] zookeeper.ZKUtil(347): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:33:28,434 WARN [M:0;39e84130bbc9:33957 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:33:28,435 INFO [M:0;39e84130bbc9:33957 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/.lastflushedseqids 2024-11-13T18:33:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741846_1030 (size=120) 2024-11-13T18:33:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741846_1030 (size=120) 2024-11-13T18:33:28,445 INFO [M:0;39e84130bbc9:33957 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:33:28,446 INFO [M:0;39e84130bbc9:33957 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:33:28,446 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:33:28,446 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:28,446 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:28,446 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:33:28,446 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:28,446 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-13T18:33:28,446 ERROR [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData-prefix:39e84130bbc9,33957,1731522778164 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:28,447 WARN [FSHLog-0-hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData-prefix:39e84130bbc9,33957,1731522778164 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:28,447 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 39e84130bbc9%2C33957%2C1731522778164:(num 1731522778337) roll requested 2024-11-13T18:33:28,447 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C33957%2C1731522778164.1731522808447 2024-11-13T18:33:28,455 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,455 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,455 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,455 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,455 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,455 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522808447 2024-11-13T18:33:28,456 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:28,456 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33483,DS-f179cf7e-3c43-4311-b6a7-172197a3a1e6,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T18:33:28,456 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 2024-11-13T18:33:28,457 WARN [IPC Server handler 0 on default port 35003 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-13T18:33:28,457 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 after 1ms 2024-11-13T18:33:28,461 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38525:38525),(127.0.0.1/127.0.0.1:45643:45643)] 2024-11-13T18:33:28,461 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 is not closed yet, will try archiving it next time 2024-11-13T18:33:28,483 DEBUG [M:0;39e84130bbc9:33957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a2b344cd5744de785a2f8ee4099a0c4 is 82, key is hbase:meta,,1/info:regioninfo/1731522779220/Put/seqid=0 2024-11-13T18:33:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741848_1033 (size=5672) 2024-11-13T18:33:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741848_1033 (size=5672) 2024-11-13T18:33:28,489 INFO [M:0;39e84130bbc9:33957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a2b344cd5744de785a2f8ee4099a0c4 2024-11-13T18:33:28,517 DEBUG [M:0;39e84130bbc9:33957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0616ebf6d37487cbdec6fd7f824ac75 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731522779833/Put/seqid=0 2024-11-13T18:33:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741849_1034 (size=6117) 2024-11-13T18:33:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741849_1034 (size=6117) 2024-11-13T18:33:28,523 INFO [M:0;39e84130bbc9:33957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0616ebf6d37487cbdec6fd7f824ac75 2024-11-13T18:33:28,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:33:28,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38457-0x100ed5fc8c90001, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:33:28,531 INFO [RS:0;39e84130bbc9:38457 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:33:28,531 INFO [RS:0;39e84130bbc9:38457 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,38457,1731522778234; zookeeper connection closed. 2024-11-13T18:33:28,537 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d9cebf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d9cebf1 2024-11-13T18:33:28,538 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:33:28,544 DEBUG [M:0;39e84130bbc9:33957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fa6f08235c742c495493517d6f764e4 is 69, key is 39e84130bbc9,38457,1731522778234/rs:state/1731522778589/Put/seqid=0 2024-11-13T18:33:28,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741850_1035 (size=5156) 2024-11-13T18:33:28,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741850_1035 (size=5156) 2024-11-13T18:33:28,550 INFO [M:0;39e84130bbc9:33957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fa6f08235c742c495493517d6f764e4 2024-11-13T18:33:28,577 DEBUG [M:0;39e84130bbc9:33957 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95a986e406814befb1dd37ea4b8f7977 is 52, key is load_balancer_on/state:d/1731522779380/Put/seqid=0 2024-11-13T18:33:28,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741851_1036 (size=5056) 2024-11-13T18:33:28,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741851_1036 (size=5056) 2024-11-13T18:33:28,585 INFO [M:0;39e84130bbc9:33957 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95a986e406814befb1dd37ea4b8f7977 2024-11-13T18:33:28,597 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4a2b344cd5744de785a2f8ee4099a0c4 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a2b344cd5744de785a2f8ee4099a0c4 2024-11-13T18:33:28,602 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4a2b344cd5744de785a2f8ee4099a0c4, entries=8, sequenceid=56, filesize=5.5 K 2024-11-13T18:33:28,603 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0616ebf6d37487cbdec6fd7f824ac75 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0616ebf6d37487cbdec6fd7f824ac75 2024-11-13T18:33:28,609 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0616ebf6d37487cbdec6fd7f824ac75, entries=6, sequenceid=56, filesize=6.0 K 2024-11-13T18:33:28,610 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9fa6f08235c742c495493517d6f764e4 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9fa6f08235c742c495493517d6f764e4 2024-11-13T18:33:28,615 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9fa6f08235c742c495493517d6f764e4, entries=1, sequenceid=56, filesize=5.0 K 2024-11-13T18:33:28,616 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95a986e406814befb1dd37ea4b8f7977 as hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95a986e406814befb1dd37ea4b8f7977 2024-11-13T18:33:28,621 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95a986e406814befb1dd37ea4b8f7977, entries=1, sequenceid=56, filesize=4.9 K 2024-11-13T18:33:28,622 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 176ms, sequenceid=56, compaction requested=false 2024-11-13T18:33:28,626 INFO [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:28,626 DEBUG [M:0;39e84130bbc9:33957 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522808446Disabling compacts and flushes for region at 1731522808446Disabling writes for close at 1731522808446Obtaining lock to block concurrent updates at 1731522808446Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522808446Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731522808447 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522808462 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522808462Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522808483 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522808483Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522808495 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522808516 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522808517 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522808528 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522808544 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522808544Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522808556 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522808577 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522808577Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@439b5326: reopening flushed file at 1731522808596 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50544c6b: reopening flushed file at 1731522808602 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@256aae6f: reopening flushed file at 1731522808609 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7643a17: reopening flushed file at 1731522808615 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 176ms, sequenceid=56, compaction requested=false at 1731522808622 (+7 ms)Writing region close event to WAL at 1731522808626 (+4 ms)Closed at 1731522808626 2024-11-13T18:33:28,626 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,626 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,627 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:33:28,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32847 is added to blk_1073741847_1031 (size=757) 2024-11-13T18:33:28,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40421 is added to blk_1073741847_1031 (size=757) 2024-11-13T18:33:28,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:28,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:29,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:29,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:29,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:33:29,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:33:29,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T18:33:29,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T18:33:30,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:30,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:30,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:31,037 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:33:31,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,060 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:31,423 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T18:33:31,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:31,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:32,458 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 after 4002ms 2024-11-13T18:33:32,458 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/WALs/39e84130bbc9,33957,1731522778164/39e84130bbc9%2C33957%2C1731522778164.1731522778337 to hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/oldWALs/39e84130bbc9%2C33957%2C1731522778164.1731522778337 2024-11-13T18:33:32,461 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/MasterData/oldWALs/39e84130bbc9%2C33957%2C1731522778164.1731522778337 to hdfs://localhost:35003/user/jenkins/test-data/20c98c78-5ac8-deac-3961-98a3054d0285/oldWALs/39e84130bbc9%2C33957%2C1731522778164.1731522778337$masterlocalwal$ 2024-11-13T18:33:32,461 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:33:32,461 INFO [M:0;39e84130bbc9:33957 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:33:32,462 INFO [M:0;39e84130bbc9:33957 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33957 2024-11-13T18:33:32,462 INFO [M:0;39e84130bbc9:33957 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:33:32,564 INFO [M:0;39e84130bbc9:33957 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:33:32,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:33:32,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33957-0x100ed5fc8c90000, quorum=127.0.0.1:62512, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:33:32,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22586a60{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:32,567 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5684bce2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:32,567 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:32,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4251f41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:32,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1decdda3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:32,569 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:32,569 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:32,569 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid fe459b16-77e9-4868-b359-df1876012c7c) service to localhost/127.0.0.1:35003 2024-11-13T18:33:32,569 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:32,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data3/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:32,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data4/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:32,570 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:32,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4eef3a93{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:32,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d8da7be{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:32,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:32,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2378632b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:32,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20432799{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:32,573 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:33:32,573 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:33:32,573 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:33:32,573 WARN [BP-540785503-172.17.0.3-1731522777124 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-540785503-172.17.0.3-1731522777124 (Datanode Uuid 17ec2018-212d-437d-bacb-09db82eca4a1) service to localhost/127.0.0.1:35003 2024-11-13T18:33:32,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data1/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:32,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/cluster_66bd62a7-939a-08c3-f781-f5df031663a0/data/data2/current/BP-540785503-172.17.0.3-1731522777124 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:33:32,574 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:33:32,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@ba4770c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:33:32,580 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4702e786{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:33:32,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:33:32,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a92d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:33:32,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13fdd007{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir/,STOPPED} 2024-11-13T18:33:32,588 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:33:32,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:33:32,615 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35003 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:35003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=379 (was 360) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1144 (was 1699) 2024-11-13T18:33:32,622 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=11, AvailableMemoryMB=1145 2024-11-13T18:33:32,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:33:32,622 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.log.dir so I do NOT create it in target/test-data/397e5369-68d2-f586-597b-429af27482c2 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c473efaf-8f0c-8926-3caf-7a78625ce351/hadoop.tmp.dir so I do NOT create it in target/test-data/397e5369-68d2-f586-597b-429af27482c2 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4, deleteOnExit=true 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/test.cache.data in system properties and HBase conf 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:33:32,623 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:33:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:33:32,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:33:32,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:32,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:32,637 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:33:32,711 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:32,717 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:32,718 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:32,718 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:32,718 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:33:32,719 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:32,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62558ec9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:32,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37ea919c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:32,850 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b612817{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/java.io.tmpdir/jetty-localhost-37413-hadoop-hdfs-3_4_1-tests_jar-_-any-8213419796285842083/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:33:32,851 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35dc38db{HTTP/1.1, (http/1.1)}{localhost:37413} 2024-11-13T18:33:32,851 INFO [Time-limited test {}] server.Server(415): Started @188902ms 2024-11-13T18:33:32,864 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:33:32,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:32,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:32,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:32,987 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:32,987 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:33:32,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1330929b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:32,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fc56883{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:33,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c020752{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/java.io.tmpdir/jetty-localhost-44429-hadoop-hdfs-3_4_1-tests_jar-_-any-8503859020355200990/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:33,107 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ac52070{HTTP/1.1, (http/1.1)}{localhost:44429} 2024-11-13T18:33:33,107 INFO [Time-limited test {}] server.Server(415): Started @189158ms 2024-11-13T18:33:33,108 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:33,192 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:33:33,196 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:33:33,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:33:33,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:33:33,199 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:33:33,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43498b11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:33:33,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22a4ff4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:33:33,222 WARN [Thread-1644 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data1/current/BP-1180493070-172.17.0.3-1731522812652/current, will proceed with Du for space computation calculation, 2024-11-13T18:33:33,222 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data2/current/BP-1180493070-172.17.0.3-1731522812652/current, will proceed with Du for space computation calculation, 2024-11-13T18:33:33,261 WARN [Thread-1623 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x949c8b5c9dcdbcdf with lease ID 0x3072f7a80c6529f9: Processing first storage report for DS-37e1d665-f312-4ec6-ab9a-9da2eed6113d from datanode DatanodeRegistration(127.0.0.1:34795, datanodeUuid=b4beaa62-e7fa-4c19-95a2-fd401bd2fc19, infoPort=38703, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652) 2024-11-13T18:33:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x949c8b5c9dcdbcdf with lease ID 0x3072f7a80c6529f9: from storage DS-37e1d665-f312-4ec6-ab9a-9da2eed6113d node DatanodeRegistration(127.0.0.1:34795, datanodeUuid=b4beaa62-e7fa-4c19-95a2-fd401bd2fc19, infoPort=38703, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:33:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x949c8b5c9dcdbcdf with lease ID 0x3072f7a80c6529f9: Processing first storage report for DS-289c2ede-32db-4b9e-8488-65628ff8a00e from datanode DatanodeRegistration(127.0.0.1:34795, datanodeUuid=b4beaa62-e7fa-4c19-95a2-fd401bd2fc19, infoPort=38703, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652) 2024-11-13T18:33:33,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x949c8b5c9dcdbcdf with lease ID 0x3072f7a80c6529f9: from storage DS-289c2ede-32db-4b9e-8488-65628ff8a00e node DatanodeRegistration(127.0.0.1:34795, datanodeUuid=b4beaa62-e7fa-4c19-95a2-fd401bd2fc19, infoPort=38703, infoSecurePort=0, ipcPort=41485, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:33,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b3ac0d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/java.io.tmpdir/jetty-localhost-34055-hadoop-hdfs-3_4_1-tests_jar-_-any-17723338370800579036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:33:33,346 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@144fb880{HTTP/1.1, (http/1.1)}{localhost:34055} 2024-11-13T18:33:33,346 INFO [Time-limited test {}] server.Server(415): Started @189397ms 2024-11-13T18:33:33,347 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:33:33,441 WARN [Thread-1670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data3/current/BP-1180493070-172.17.0.3-1731522812652/current, will proceed with Du for space computation calculation, 2024-11-13T18:33:33,441 WARN [Thread-1671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data4/current/BP-1180493070-172.17.0.3-1731522812652/current, will proceed with Du for space computation calculation, 2024-11-13T18:33:33,464 WARN [Thread-1659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:33:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbec50bb81bfd592a with lease ID 0x3072f7a80c6529fa: Processing first storage report for DS-ae8bcc35-18f8-4c25-852d-dfbae82678cc from datanode DatanodeRegistration(127.0.0.1:38871, datanodeUuid=7c38f4d0-ea7f-4e80-887b-f0d6c70658bf, infoPort=34759, infoSecurePort=0, ipcPort=45989, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652) 2024-11-13T18:33:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec50bb81bfd592a with lease ID 0x3072f7a80c6529fa: from storage DS-ae8bcc35-18f8-4c25-852d-dfbae82678cc node DatanodeRegistration(127.0.0.1:38871, datanodeUuid=7c38f4d0-ea7f-4e80-887b-f0d6c70658bf, infoPort=34759, infoSecurePort=0, ipcPort=45989, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbec50bb81bfd592a with lease ID 0x3072f7a80c6529fa: Processing first storage report for DS-8930c024-038c-4107-83ab-743353fbb29d from datanode DatanodeRegistration(127.0.0.1:38871, datanodeUuid=7c38f4d0-ea7f-4e80-887b-f0d6c70658bf, infoPort=34759, infoSecurePort=0, ipcPort=45989, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652) 2024-11-13T18:33:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec50bb81bfd592a with lease ID 0x3072f7a80c6529fa: from storage DS-8930c024-038c-4107-83ab-743353fbb29d node DatanodeRegistration(127.0.0.1:38871, datanodeUuid=7c38f4d0-ea7f-4e80-887b-f0d6c70658bf, infoPort=34759, infoSecurePort=0, ipcPort=45989, storageInfo=lv=-57;cid=testClusterID;nsid=343333489;c=1731522812652), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:33:33,488 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2 2024-11-13T18:33:33,491 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/zookeeper_0, clientPort=58025, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:33:33,492 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58025 2024-11-13T18:33:33,492 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,494 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:33:33,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:33:33,504 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0 with version=8 2024-11-13T18:33:33,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:33:33,507 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:33:33,507 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:33:33,508 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40397 2024-11-13T18:33:33,510 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40397 connecting to ZooKeeper ensemble=127.0.0.1:58025 2024-11-13T18:33:33,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:403970x0, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:33:33,517 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40397-0x100ed6052dc0000 connected 2024-11-13T18:33:33,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,538 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,540 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:33:33,540 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0, hbase.cluster.distributed=false 2024-11-13T18:33:33,542 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:33:33,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40397 2024-11-13T18:33:33,545 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40397 2024-11-13T18:33:33,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40397 2024-11-13T18:33:33,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40397 2024-11-13T18:33:33,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40397 2024-11-13T18:33:33,569 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:33:33,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,569 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:33:33,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:33:33,569 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:33:33,570 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:33:33,570 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:33:33,570 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35433 2024-11-13T18:33:33,572 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35433 connecting to ZooKeeper ensemble=127.0.0.1:58025 2024-11-13T18:33:33,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,575 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354330x0, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:33:33,580 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:354330x0, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:33:33,580 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35433-0x100ed6052dc0001 connected 2024-11-13T18:33:33,581 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:33:33,581 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:33:33,582 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:33:33,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:33:33,583 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35433 2024-11-13T18:33:33,583 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35433 2024-11-13T18:33:33,584 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35433 2024-11-13T18:33:33,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35433 2024-11-13T18:33:33,585 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35433 2024-11-13T18:33:33,603 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:40397 2024-11-13T18:33:33,603 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:33:33,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:33:33,605 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:33:33,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,608 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:33:33,608 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,40397,1731522813506 from backup master directory 2024-11-13T18:33:33,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:33:33,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:33:33,610 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:33:33,610 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,614 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/hbase.id] with ID: 6d54a561-e343-431a-ba3a-711e0e7b55f8 2024-11-13T18:33:33,614 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/.tmp/hbase.id 2024-11-13T18:33:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:33:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:33:33,625 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/.tmp/hbase.id]:[hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/hbase.id] 2024-11-13T18:33:33,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:33,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:33,637 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:33,637 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:33:33,638 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T18:33:33,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:33:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:33:33,652 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:33:33,652 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:33:33,653 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:33:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:33:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:33:33,664 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store 2024-11-13T18:33:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:33:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:33:33,673 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:33,673 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:33:33,673 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522813673Disabling compacts and flushes for region at 1731522813673Disabling writes for close at 1731522813673Writing region close event to WAL at 1731522813673Closed at 1731522813673 2024-11-13T18:33:33,674 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/.initializing 2024-11-13T18:33:33,674 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/WALs/39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,676 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C40397%2C1731522813506, suffix=, logDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/WALs/39e84130bbc9,40397,1731522813506, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/oldWALs, maxLogs=10 2024-11-13T18:33:33,677 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C40397%2C1731522813506.1731522813677 2024-11-13T18:33:33,681 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/WALs/39e84130bbc9,40397,1731522813506/39e84130bbc9%2C40397%2C1731522813506.1731522813677 2024-11-13T18:33:33,686 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34759:34759),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-13T18:33:33,689 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:33:33,689 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:33,689 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,689 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,692 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:33:33,693 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:33,694 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:33:33,695 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:33:33,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,697 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:33:33,697 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:33:33,698 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:33:33,700 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:33:33,700 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,701 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,702 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,704 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,704 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,705 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:33:33,707 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:33:33,710 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:33:33,710 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818311, jitterRate=0.0405362993478775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:33:33,711 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522813690Initializing all the Stores at 1731522813690Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522813690Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522813691 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522813691Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522813691Cleaning up temporary data from old regions at 1731522813704 (+13 ms)Region opened successfully at 1731522813711 (+7 ms) 2024-11-13T18:33:33,711 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:33:33,714 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfad061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:33:33,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:33:33,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:33:33,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:33:33,716 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:33:33,717 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:33:33,717 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:33:33,717 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:33:33,719 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:33:33,720 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:33:33,722 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:33:33,722 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:33:33,724 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:33:33,726 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:33:33,726 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:33:33,727 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:33:33,729 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:33:33,730 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:33:33,731 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:33:33,733 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:33:33,734 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:33:33,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:33:33,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:33:33,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,736 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,40397,1731522813506, sessionid=0x100ed6052dc0000, setting cluster-up flag (Was=false) 2024-11-13T18:33:33,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,745 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:33:33,746 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:33,754 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:33:33,755 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,40397,1731522813506 2024-11-13T18:33:33,756 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:33:33,758 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:33:33,758 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:33:33,758 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:33:33,759 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,40397,1731522813506 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:33:33,761 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,763 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:33:33,763 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:33:33,764 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,764 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:33:33,765 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522843765 2024-11-13T18:33:33,765 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:33:33,765 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:33:33,765 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:33:33,766 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:33:33,766 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:33:33,766 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:33:33,769 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,770 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:33:33,770 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:33:33,770 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:33:33,770 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:33:33,770 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:33:33,770 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522813770,5,FailOnTimeoutGroup] 2024-11-13T18:33:33,771 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522813771,5,FailOnTimeoutGroup] 2024-11-13T18:33:33,771 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,771 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:33:33,771 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,771 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:33:33,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:33:33,775 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:33:33,775 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0 2024-11-13T18:33:33,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:33:33,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:33:33,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:33,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:33:33,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:33:33,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:33,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:33:33,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:33:33,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:33,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:33:33,787 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(746): ClusterId : 6d54a561-e343-431a-ba3a-711e0e7b55f8 2024-11-13T18:33:33,787 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:33:33,788 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:33:33,788 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:33,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:33:33,789 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:33:33,789 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:33:33,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:33:33,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:33,790 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:33,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:33:33,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740 2024-11-13T18:33:33,791 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740 2024-11-13T18:33:33,792 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:33:33,792 DEBUG [RS:0;39e84130bbc9:35433 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@287610c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:33:33,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:33:33,793 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:33:33,793 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:33:33,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:33:33,798 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:33:33,798 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711681, jitterRate=-0.09505192935466766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:33:33,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522813782Initializing all the Stores at 1731522813783 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522813783Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522813783Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522813783Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522813783Cleaning up temporary data from old regions at 1731522813793 (+10 ms)Region opened successfully at 1731522813799 (+6 ms) 2024-11-13T18:33:33,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:33:33,799 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:33:33,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:33:33,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:33:33,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:33:33,800 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:33:33,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522813799Disabling compacts and flushes for region at 1731522813799Disabling writes for close at 1731522813799Writing region close event to WAL at 1731522813800 (+1 ms)Closed at 1731522813800 2024-11-13T18:33:33,801 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:33:33,801 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:33:33,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:33:33,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:33:33,805 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:33:33,808 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:35433 2024-11-13T18:33:33,808 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:33:33,808 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:33:33,808 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:33:33,808 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,40397,1731522813506 with port=35433, startcode=1731522813569 2024-11-13T18:33:33,809 DEBUG [RS:0;39e84130bbc9:35433 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:33:33,811 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40353, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:33:33,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40397 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,812 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40397 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,813 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0 2024-11-13T18:33:33,813 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43749 2024-11-13T18:33:33,813 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:33:33,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:33:33,823 DEBUG [RS:0;39e84130bbc9:35433 {}] zookeeper.ZKUtil(111): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,823 WARN [RS:0;39e84130bbc9:35433 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:33:33,823 INFO [RS:0;39e84130bbc9:35433 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:33:33,823 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,826 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,35433,1731522813569] 2024-11-13T18:33:33,832 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:33:33,839 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:33:33,840 INFO [RS:0;39e84130bbc9:35433 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:33:33,840 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,845 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:33:33,847 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:33:33,847 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,847 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,848 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,848 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:33:33,848 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:33:33,848 DEBUG [RS:0;39e84130bbc9:35433 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,853 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,35433,1731522813569-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:33:33,880 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:33:33,880 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,35433,1731522813569-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,880 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,880 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.Replication(171): 39e84130bbc9,35433,1731522813569 started 2024-11-13T18:33:33,900 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:33,900 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,35433,1731522813569, RpcServer on 39e84130bbc9/172.17.0.3:35433, sessionid=0x100ed6052dc0001 2024-11-13T18:33:33,900 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:33:33,900 DEBUG [RS:0;39e84130bbc9:35433 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,900 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,35433,1731522813569' 2024-11-13T18:33:33,900 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:33:33,901 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,35433,1731522813569 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,35433,1731522813569' 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:33:33,902 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:33:33,903 DEBUG [RS:0;39e84130bbc9:35433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:33:33,903 INFO [RS:0;39e84130bbc9:35433 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:33:33,903 INFO [RS:0;39e84130bbc9:35433 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:33:33,956 WARN [39e84130bbc9:40397 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:33:34,005 INFO [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C35433%2C1731522813569, suffix=, logDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs, maxLogs=32 2024-11-13T18:33:34,006 INFO [RS:0;39e84130bbc9:35433 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35433%2C1731522813569.1731522814006 2024-11-13T18:33:34,025 INFO [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522814006 2024-11-13T18:33:34,027 DEBUG [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34759:34759),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-13T18:33:34,206 DEBUG [39e84130bbc9:40397 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:33:34,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:34,208 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,35433,1731522813569, state=OPENING 2024-11-13T18:33:34,209 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:33:34,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:34,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:33:34,211 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:33:34,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,35433,1731522813569}] 2024-11-13T18:33:34,213 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:33:34,213 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:33:34,365 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:33:34,367 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55225, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:33:34,371 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:33:34,371 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:33:34,373 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C35433%2C1731522813569.meta, suffix=.meta, logDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569, archiveDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs, maxLogs=32 2024-11-13T18:33:34,373 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35433%2C1731522813569.meta.1731522814373.meta 2024-11-13T18:33:34,386 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.meta.1731522814373.meta 2024-11-13T18:33:34,394 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38703:38703),(127.0.0.1/127.0.0.1:34759:34759)] 2024-11-13T18:33:34,399 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:33:34,399 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:33:34,399 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:33:34,399 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:33:34,399 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:33:34,399 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:34,400 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:33:34,400 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:33:34,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:33:34,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:33:34,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:34,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:33:34,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:33:34,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:34,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:33:34,406 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:33:34,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:34,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:33:34,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:33:34,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:33:34,408 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:33:34,408 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740 2024-11-13T18:33:34,410 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740 2024-11-13T18:33:34,411 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:33:34,411 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:33:34,412 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:33:34,413 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:33:34,414 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872382, jitterRate=0.10929213464260101}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:33:34,414 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:33:34,415 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522814400Writing region info on filesystem at 1731522814400Initializing all the Stores at 1731522814401 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522814401Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522814401Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522814401Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522814401Cleaning up temporary data from old regions at 1731522814411 (+10 ms)Running coprocessor post-open hooks at 1731522814414 (+3 ms)Region opened successfully at 1731522814415 (+1 ms) 2024-11-13T18:33:34,416 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522814365 2024-11-13T18:33:34,419 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:33:34,419 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:33:34,420 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:34,422 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,35433,1731522813569, state=OPEN 2024-11-13T18:33:34,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:33:34,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:33:34,428 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:34,428 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:33:34,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:33:34,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:33:34,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,35433,1731522813569 in 216 msec 2024-11-13T18:33:34,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:33:34,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 631 msec 2024-11-13T18:33:34,436 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:33:34,436 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:33:34,439 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:33:34,440 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,35433,1731522813569, seqNum=-1] 2024-11-13T18:33:34,440 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:33:34,442 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47539, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:33:34,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 689 msec 2024-11-13T18:33:34,449 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522814449, completionTime=-1 2024-11-13T18:33:34,449 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:33:34,449 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:33:34,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:33:34,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522874451 2024-11-13T18:33:34,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522934451 2024-11-13T18:33:34,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T18:33:34,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:40397, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,454 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:33:34,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,461 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.851sec 2024-11-13T18:33:34,461 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:33:34,461 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:33:34,462 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:33:34,462 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:33:34,462 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:33:34,462 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:33:34,462 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:33:34,473 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:33:34,473 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:33:34,473 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,40397,1731522813506-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:33:34,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c7d522f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:33:34,490 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,40397,-1 for getting cluster id 2024-11-13T18:33:34,490 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:33:34,492 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6d54a561-e343-431a-ba3a-711e0e7b55f8' 2024-11-13T18:33:34,493 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:33:34,493 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6d54a561-e343-431a-ba3a-711e0e7b55f8" 2024-11-13T18:33:34,493 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72369378, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:33:34,494 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,40397,-1] 2024-11-13T18:33:34,494 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:33:34,494 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:33:34,496 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50986, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:33:34,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b173219, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:33:34,498 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:33:34,499 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,35433,1731522813569, seqNum=-1] 2024-11-13T18:33:34,500 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:33:34,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49636, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:33:34,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,40397,1731522813506 2024-11-13T18:33:34,504 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:33:34,508 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:33:34,509 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T18:33:34,511 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 39e84130bbc9,40397,1731522813506 2024-11-13T18:33:34,511 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@16ee86be 2024-11-13T18:33:34,511 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T18:33:34,512 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50988, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T18:33:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T18:33:34,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T18:33:34,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:33:34,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:34,517 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T18:33:34,517 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,517 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-13T18:33:34,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:33:34,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T18:33:34,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741835_1011 (size=405) 2024-11-13T18:33:34,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741835_1011 (size=405) 2024-11-13T18:33:34,537 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2cdf957bd75f8c509e5330db595fd178, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0 2024-11-13T18:33:34,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741836_1012 (size=88) 2024-11-13T18:33:34,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741836_1012 (size=88) 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 2cdf957bd75f8c509e5330db595fd178, disabling compactions & flushes 2024-11-13T18:33:34,552 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. after waiting 0 ms 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,552 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,552 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2cdf957bd75f8c509e5330db595fd178: Waiting for close lock at 1731522814552Disabling compacts and flushes for region at 1731522814552Disabling writes for close at 1731522814552Writing region close event to WAL at 1731522814552Closed at 1731522814552 2024-11-13T18:33:34,554 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T18:33:34,554 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731522814554"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522814554"}]},"ts":"1731522814554"} 2024-11-13T18:33:34,557 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T18:33:34,559 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T18:33:34,559 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522814559"}]},"ts":"1731522814559"} 2024-11-13T18:33:34,562 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-13T18:33:34,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2cdf957bd75f8c509e5330db595fd178, ASSIGN}] 2024-11-13T18:33:34,564 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2cdf957bd75f8c509e5330db595fd178, ASSIGN 2024-11-13T18:33:34,565 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2cdf957bd75f8c509e5330db595fd178, ASSIGN; state=OFFLINE, location=39e84130bbc9,35433,1731522813569; forceNewPlan=false, retain=false 2024-11-13T18:33:34,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:34,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:34,715 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2cdf957bd75f8c509e5330db595fd178, regionState=OPENING, regionLocation=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:34,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2cdf957bd75f8c509e5330db595fd178, ASSIGN because future has completed 2024-11-13T18:33:34,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cdf957bd75f8c509e5330db595fd178, server=39e84130bbc9,35433,1731522813569}] 2024-11-13T18:33:34,884 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,884 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2cdf957bd75f8c509e5330db595fd178, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:33:34,885 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,885 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:33:34,885 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,885 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,886 INFO [StoreOpener-2cdf957bd75f8c509e5330db595fd178-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,888 INFO [StoreOpener-2cdf957bd75f8c509e5330db595fd178-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2cdf957bd75f8c509e5330db595fd178 columnFamilyName info 2024-11-13T18:33:34,888 DEBUG [StoreOpener-2cdf957bd75f8c509e5330db595fd178-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:33:34,889 INFO [StoreOpener-2cdf957bd75f8c509e5330db595fd178-1 {}] regionserver.HStore(327): Store=2cdf957bd75f8c509e5330db595fd178/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:33:34,889 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,890 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,890 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,891 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,891 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,892 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,895 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:33:34,896 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2cdf957bd75f8c509e5330db595fd178; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713479, jitterRate=-0.09276469051837921}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:33:34,896 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:33:34,897 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2cdf957bd75f8c509e5330db595fd178: Running coprocessor pre-open hook at 1731522814885Writing region info on filesystem at 1731522814885Initializing all the Stores at 1731522814886 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522814886Cleaning up temporary data from old regions at 1731522814891 (+5 ms)Running coprocessor post-open hooks at 1731522814896 (+5 ms)Region opened successfully at 1731522814897 (+1 ms) 2024-11-13T18:33:34,902 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178., pid=6, masterSystemTime=1731522814876 2024-11-13T18:33:34,905 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,905 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:34,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2cdf957bd75f8c509e5330db595fd178, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,35433,1731522813569 2024-11-13T18:33:34,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2cdf957bd75f8c509e5330db595fd178, server=39e84130bbc9,35433,1731522813569 because future has completed 2024-11-13T18:33:34,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T18:33:34,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2cdf957bd75f8c509e5330db595fd178, server=39e84130bbc9,35433,1731522813569 in 191 msec 2024-11-13T18:33:34,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T18:33:34,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=2cdf957bd75f8c509e5330db595fd178, ASSIGN in 351 msec 2024-11-13T18:33:34,918 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T18:33:34,918 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522814918"}]},"ts":"1731522814918"} 2024-11-13T18:33:34,920 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-13T18:33:34,922 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T18:33:34,924 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 409 msec 2024-11-13T18:33:35,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:35,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:36,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:36,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:37,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:37,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:38,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:38,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:39,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:39,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:39,901 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:33:39,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:33:39,942 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T18:33:39,942 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T18:33:39,943 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:33:39,943 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T18:33:39,943 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T18:33:39,943 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T18:33:39,943 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:39,943 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T18:33:39,944 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T18:33:39,945 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-13T18:33:40,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:40,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:41,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:41,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:42,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:42,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:43,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:43,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:44,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:33:44,573 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T18:33:44,573 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-13T18:33:44,577 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:44,577 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:44,580 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178., hostname=39e84130bbc9,35433,1731522813569, seqNum=2] 2024-11-13T18:33:44,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:44,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T18:33:44,596 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T18:33:44,597 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T18:33:44,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T18:33:44,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:44,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:44,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35433 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-13T18:33:44,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:44,766 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 2cdf957bd75f8c509e5330db595fd178 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T18:33:44,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/e1988e1928ee4ec89ff2efd20f843ca0 is 1080, key is row0001/info:/1731522824581/Put/seqid=0 2024-11-13T18:33:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741837_1013 (size=6033) 2024-11-13T18:33:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741837_1013 (size=6033) 2024-11-13T18:33:44,796 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/e1988e1928ee4ec89ff2efd20f843ca0 2024-11-13T18:33:44,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/e1988e1928ee4ec89ff2efd20f843ca0 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0 2024-11-13T18:33:44,810 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0, entries=1, sequenceid=5, filesize=5.9 K 2024-11-13T18:33:44,811 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 46ms, sequenceid=5, compaction requested=false 2024-11-13T18:33:44,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 2cdf957bd75f8c509e5330db595fd178: 2024-11-13T18:33:44,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:44,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-13T18:33:44,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-13T18:33:44,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T18:33:44,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 219 msec 2024-11-13T18:33:44,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 232 msec 2024-11-13T18:33:45,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:45,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:46,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:46,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:47,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:47,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:48,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:48,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:49,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:49,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:50,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:50,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:51,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:51,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:52,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:52,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:53,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:53,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:54,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T18:33:54,624 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T18:33:54,627 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:33:54,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T18:33:54,630 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T18:33:54,631 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T18:33:54,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T18:33:54,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:54,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:54,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35433 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-13T18:33:54,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:54,786 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 2cdf957bd75f8c509e5330db595fd178 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T18:33:54,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/9430b5ed5f234d9099b91d711c2005b4 is 1080, key is row0002/info:/1731522834625/Put/seqid=0 2024-11-13T18:33:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741838_1014 (size=6033) 2024-11-13T18:33:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741838_1014 (size=6033) 2024-11-13T18:33:54,798 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/9430b5ed5f234d9099b91d711c2005b4 2024-11-13T18:33:54,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/9430b5ed5f234d9099b91d711c2005b4 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4 2024-11-13T18:33:54,810 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4, entries=1, sequenceid=9, filesize=5.9 K 2024-11-13T18:33:54,811 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 25ms, sequenceid=9, compaction requested=false 2024-11-13T18:33:54,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 2cdf957bd75f8c509e5330db595fd178: 2024-11-13T18:33:54,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:33:54,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-13T18:33:54,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-13T18:33:54,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-13T18:33:54,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-13T18:33:54,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-13T18:33:55,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:55,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:56,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:56,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:57,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 after 68055ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:57,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T18:33:58,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:58,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:33:59,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:00,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:00,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:01,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:01,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:02,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:02,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:03,487 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:34:03,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:03,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:04,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:04,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T18:34:04,713 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T18:34:04,716 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35433%2C1731522813569.1731522844716 2024-11-13T18:34:04,721 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:04,721 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:04,721 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:04,721 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:04,721 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:04,722 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522814006 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522844716 2024-11-13T18:34:04,722 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34759:34759),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-13T18:34:04,722 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522814006 is not closed yet, will try archiving it next time 2024-11-13T18:34:04,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:34:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741833_1009 (size=5546) 2024-11-13T18:34:04,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741833_1009 (size=5546) 2024-11-13T18:34:04,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:34:04,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T18:34:04,726 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T18:34:04,727 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T18:34:04,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T18:34:04,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35433 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-13T18:34:04,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:04,881 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 2cdf957bd75f8c509e5330db595fd178 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T18:34:04,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/034ae1c05f724a0a9414b91beed654e3 is 1080, key is row0003/info:/1731522844714/Put/seqid=0 2024-11-13T18:34:04,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741840_1016 (size=6033) 2024-11-13T18:34:04,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741840_1016 (size=6033) 2024-11-13T18:34:04,893 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/034ae1c05f724a0a9414b91beed654e3 2024-11-13T18:34:04,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/034ae1c05f724a0a9414b91beed654e3 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3 2024-11-13T18:34:04,910 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3, entries=1, sequenceid=13, filesize=5.9 K 2024-11-13T18:34:04,911 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 30ms, sequenceid=13, compaction requested=true 2024-11-13T18:34:04,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 2cdf957bd75f8c509e5330db595fd178: 2024-11-13T18:34:04,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:04,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-13T18:34:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-13T18:34:04,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T18:34:04,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 187 msec 2024-11-13T18:34:04,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-11-13T18:34:05,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:05,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:06,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:06,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:07,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:07,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:08,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:08,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:09,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:09,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:10,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:10,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:11,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:11,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:12,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:12,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:13,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:13,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:14,502 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T18:34:14,502 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T18:34:14,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:14,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T18:34:14,813 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T18:34:14,813 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:34:14,814 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:34:14,814 DEBUG [Time-limited test {}] regionserver.HStore(1541): 2cdf957bd75f8c509e5330db595fd178/info is initiating minor compaction (all files) 2024-11-13T18:34:14,815 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:34:14,815 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:14,815 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 2cdf957bd75f8c509e5330db595fd178/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:14,815 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3] into tmpdir=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp, totalSize=17.7 K 2024-11-13T18:34:14,815 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e1988e1928ee4ec89ff2efd20f843ca0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731522824581 2024-11-13T18:34:14,816 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9430b5ed5f234d9099b91d711c2005b4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731522834625 2024-11-13T18:34:14,816 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 034ae1c05f724a0a9414b91beed654e3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731522844714 2024-11-13T18:34:14,830 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 2cdf957bd75f8c509e5330db595fd178#info#compaction#46 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:34:14,831 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/1a62d7686b6547ff822189e3cdff1458 is 1080, key is row0001/info:/1731522824581/Put/seqid=0 2024-11-13T18:34:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741841_1017 (size=8296) 2024-11-13T18:34:14,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741841_1017 (size=8296) 2024-11-13T18:34:14,842 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/1a62d7686b6547ff822189e3cdff1458 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/1a62d7686b6547ff822189e3cdff1458 2024-11-13T18:34:14,848 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2cdf957bd75f8c509e5330db595fd178/info of 2cdf957bd75f8c509e5330db595fd178 into 1a62d7686b6547ff822189e3cdff1458(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:34:14,848 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 2cdf957bd75f8c509e5330db595fd178: 2024-11-13T18:34:14,851 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35433%2C1731522813569.1731522854851 2024-11-13T18:34:14,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:14,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:14,857 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:14,857 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:14,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:14,857 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522844716 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522854851 2024-11-13T18:34:14,858 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34759:34759),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-13T18:34:14,858 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522844716 is not closed yet, will try archiving it next time 2024-11-13T18:34:14,858 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522814006 to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs/39e84130bbc9%2C35433%2C1731522813569.1731522814006 2024-11-13T18:34:14,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741839_1015 (size=2520) 2024-11-13T18:34:14,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741839_1015 (size=2520) 2024-11-13T18:34:14,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:34:14,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:34:14,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T18:34:14,862 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T18:34:14,863 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T18:34:14,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T18:34:15,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35433 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-13T18:34:15,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:15,016 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 2cdf957bd75f8c509e5330db595fd178 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T18:34:15,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/6b13c7ba2d024d93a1b901910cc653c6 is 1080, key is row0000/info:/1731522854850/Put/seqid=0 2024-11-13T18:34:15,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741843_1019 (size=6033) 2024-11-13T18:34:15,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741843_1019 (size=6033) 2024-11-13T18:34:15,027 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/6b13c7ba2d024d93a1b901910cc653c6 2024-11-13T18:34:15,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/6b13c7ba2d024d93a1b901910cc653c6 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/6b13c7ba2d024d93a1b901910cc653c6 2024-11-13T18:34:15,037 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/6b13c7ba2d024d93a1b901910cc653c6, entries=1, sequenceid=18, filesize=5.9 K 2024-11-13T18:34:15,038 INFO [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 22ms, sequenceid=18, compaction requested=false 2024-11-13T18:34:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 2cdf957bd75f8c509e5330db595fd178: 2024-11-13T18:34:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:15,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T18:34:15,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T18:34:15,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-13T18:34:15,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-13T18:34:15,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-13T18:34:15,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:15,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:16,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:16,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:17,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:17,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:18,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:18,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:19,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:19,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:19,885 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2cdf957bd75f8c509e5330db595fd178, had cached 0 bytes from a total of 14329 2024-11-13T18:34:20,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:20,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:21,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:21,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:22,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:22,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:23,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:23,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:24,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:24,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40397 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T18:34:24,933 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T18:34:24,937 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C35433%2C1731522813569.1731522864937 2024-11-13T18:34:24,947 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:24,947 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:24,948 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:24,948 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:24,948 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:24,948 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522854851 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522864937 2024-11-13T18:34:24,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741842_1018 (size=2026) 2024-11-13T18:34:24,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741842_1018 (size=2026) 2024-11-13T18:34:24,951 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/WALs/39e84130bbc9,35433,1731522813569/39e84130bbc9%2C35433%2C1731522813569.1731522844716 to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs/39e84130bbc9%2C35433%2C1731522813569.1731522844716 2024-11-13T18:34:24,953 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34759:34759),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-13T18:34:24,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:34:24,954 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:34:24,954 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:34:24,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:24,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:24,954 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:34:24,954 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:34:24,954 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1156108172, stopped=false 2024-11-13T18:34:24,954 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,40397,1731522813506 2024-11-13T18:34:24,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:34:24,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:34:24,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:24,956 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:34:24,956 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:34:24,957 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:34:24,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:24,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:24,957 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,35433,1731522813569' ***** 2024-11-13T18:34:24,957 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:34:24,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:34:24,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:34:24,957 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:34:24,957 INFO [RS:0;39e84130bbc9:35433 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:34:24,957 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:34:24,957 INFO [RS:0;39e84130bbc9:35433 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:34:24,957 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(3091): Received CLOSE for 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,35433,1731522813569 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:35433. 2024-11-13T18:34:24,958 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2cdf957bd75f8c509e5330db595fd178, disabling compactions & flushes 2024-11-13T18:34:24,958 DEBUG [RS:0;39e84130bbc9:35433 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:34:24,958 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:24,958 DEBUG [RS:0;39e84130bbc9:35433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:24,958 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:24,958 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. after waiting 0 ms 2024-11-13T18:34:24,958 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:34:24,958 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2cdf957bd75f8c509e5330db595fd178 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T18:34:24,958 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:34:24,959 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T18:34:24,959 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1325): Online Regions={2cdf957bd75f8c509e5330db595fd178=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T18:34:24,959 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2cdf957bd75f8c509e5330db595fd178 2024-11-13T18:34:24,959 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:34:24,959 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:34:24,959 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:34:24,959 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:34:24,959 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:34:24,959 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-13T18:34:24,967 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/f3bdbd899f874f15afec573d4bdc6a9c is 1080, key is row0001/info:/1731522864935/Put/seqid=0 2024-11-13T18:34:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741845_1021 (size=6033) 2024-11-13T18:34:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741845_1021 (size=6033) 2024-11-13T18:34:24,974 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/f3bdbd899f874f15afec573d4bdc6a9c 2024-11-13T18:34:24,981 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/.tmp/info/f3bdbd899f874f15afec573d4bdc6a9c as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/f3bdbd899f874f15afec573d4bdc6a9c 2024-11-13T18:34:24,984 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/info/3305136e56cc4a28907b7b10f695e6f9 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178./info:regioninfo/1731522814906/Put/seqid=0 2024-11-13T18:34:24,991 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/f3bdbd899f874f15afec573d4bdc6a9c, entries=1, sequenceid=22, filesize=5.9 K 2024-11-13T18:34:24,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741846_1022 (size=7308) 2024-11-13T18:34:24,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741846_1022 (size=7308) 2024-11-13T18:34:24,995 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/info/3305136e56cc4a28907b7b10f695e6f9 2024-11-13T18:34:24,996 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 38ms, sequenceid=22, compaction requested=true 2024-11-13T18:34:24,997 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3] to archive 2024-11-13T18:34:24,999 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:34:25,004 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0 to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/e1988e1928ee4ec89ff2efd20f843ca0 2024-11-13T18:34:25,006 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4 to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/9430b5ed5f234d9099b91d711c2005b4 2024-11-13T18:34:25,008 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3 to hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/info/034ae1c05f724a0a9414b91beed654e3 2024-11-13T18:34:25,008 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39e84130bbc9:40397 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T18:34:25,009 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e1988e1928ee4ec89ff2efd20f843ca0=6033, 9430b5ed5f234d9099b91d711c2005b4=6033, 034ae1c05f724a0a9414b91beed654e3=6033] 2024-11-13T18:34:25,015 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/2cdf957bd75f8c509e5330db595fd178/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-13T18:34:25,015 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:25,015 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2cdf957bd75f8c509e5330db595fd178: Waiting for close lock at 1731522864958Running coprocessor pre-close hooks at 1731522864958Disabling compacts and flushes for region at 1731522864958Disabling writes for close at 1731522864958Obtaining lock to block concurrent updates at 1731522864958Preparing flush snapshotting stores in 2cdf957bd75f8c509e5330db595fd178 at 1731522864958Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731522864959 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. at 1731522864959Flushing 2cdf957bd75f8c509e5330db595fd178/info: creating writer at 1731522864960 (+1 ms)Flushing 2cdf957bd75f8c509e5330db595fd178/info: appending metadata at 1731522864966 (+6 ms)Flushing 2cdf957bd75f8c509e5330db595fd178/info: closing flushed file at 1731522864966Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bd29833: reopening flushed file at 1731522864980 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 2cdf957bd75f8c509e5330db595fd178 in 38ms, sequenceid=22, compaction requested=true at 1731522864996 (+16 ms)Writing region close event to WAL at 1731522865009 (+13 ms)Running coprocessor post-close hooks at 1731522865015 (+6 ms)Closed at 1731522865015 2024-11-13T18:34:25,016 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731522814512.2cdf957bd75f8c509e5330db595fd178. 2024-11-13T18:34:25,024 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/ns/644d5cdd076d4e9893f4511c830410a1 is 43, key is default/ns:d/1731522814443/Put/seqid=0 2024-11-13T18:34:25,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741847_1023 (size=5153) 2024-11-13T18:34:25,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741847_1023 (size=5153) 2024-11-13T18:34:25,159 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T18:34:25,359 DEBUG [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T18:34:25,434 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/ns/644d5cdd076d4e9893f4511c830410a1 2024-11-13T18:34:25,456 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/table/22b96c379cd64f669b2bdba0eddc8a89 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731522814918/Put/seqid=0 2024-11-13T18:34:25,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741848_1024 (size=5508) 2024-11-13T18:34:25,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741848_1024 (size=5508) 2024-11-13T18:34:25,469 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/table/22b96c379cd64f669b2bdba0eddc8a89 2024-11-13T18:34:25,475 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/info/3305136e56cc4a28907b7b10f695e6f9 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/info/3305136e56cc4a28907b7b10f695e6f9 2024-11-13T18:34:25,481 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/info/3305136e56cc4a28907b7b10f695e6f9, entries=10, sequenceid=11, filesize=7.1 K 2024-11-13T18:34:25,482 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/ns/644d5cdd076d4e9893f4511c830410a1 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/ns/644d5cdd076d4e9893f4511c830410a1 2024-11-13T18:34:25,487 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/ns/644d5cdd076d4e9893f4511c830410a1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T18:34:25,488 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/.tmp/table/22b96c379cd64f669b2bdba0eddc8a89 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/table/22b96c379cd64f669b2bdba0eddc8a89 2024-11-13T18:34:25,494 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/table/22b96c379cd64f669b2bdba0eddc8a89, entries=2, sequenceid=11, filesize=5.4 K 2024-11-13T18:34:25,495 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 536ms, sequenceid=11, compaction requested=false 2024-11-13T18:34:25,503 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T18:34:25,504 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:34:25,504 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:34:25,504 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522864959Running coprocessor pre-close hooks at 1731522864959Disabling compacts and flushes for region at 1731522864959Disabling writes for close at 1731522864959Obtaining lock to block concurrent updates at 1731522864959Preparing flush snapshotting stores in 1588230740 at 1731522864959Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731522864961 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731522864961Flushing 1588230740/info: creating writer at 1731522864962 (+1 ms)Flushing 1588230740/info: appending metadata at 1731522864984 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731522864984Flushing 1588230740/ns: creating writer at 1731522865006 (+22 ms)Flushing 1588230740/ns: appending metadata at 1731522865023 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731522865023Flushing 1588230740/table: creating writer at 1731522865441 (+418 ms)Flushing 1588230740/table: appending metadata at 1731522865456 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731522865456Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30899298: reopening flushed file at 1731522865474 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f5a7475: reopening flushed file at 1731522865481 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e3f293b: reopening flushed file at 1731522865488 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 536ms, sequenceid=11, compaction requested=false at 1731522865495 (+7 ms)Writing region close event to WAL at 1731522865496 (+1 ms)Running coprocessor post-close hooks at 1731522865504 (+8 ms)Closed at 1731522865504 2024-11-13T18:34:25,504 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:34:25,559 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,35433,1731522813569; all regions closed. 2024-11-13T18:34:25,561 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,561 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,561 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,561 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,561 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741834_1010 (size=3306) 2024-11-13T18:34:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741834_1010 (size=3306) 2024-11-13T18:34:25,571 DEBUG [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs 2024-11-13T18:34:25,571 INFO [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C35433%2C1731522813569.meta:.meta(num 1731522814373) 2024-11-13T18:34:25,576 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,576 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,576 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,577 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,577 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:25,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741844_1020 (size=1252) 2024-11-13T18:34:25,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741844_1020 (size=1252) 2024-11-13T18:34:25,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:25,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:25,859 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:34:25,917 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T18:34:25,917 INFO [regionserver/39e84130bbc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T18:34:25,988 DEBUG [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/oldWALs 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C35433%2C1731522813569:(num 1731522864937) 2024-11-13T18:34:25,988 DEBUG [RS:0;39e84130bbc9:35433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:34:25,988 INFO [RS:0;39e84130bbc9:35433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35433 2024-11-13T18:34:25,989 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:34:25,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,35433,1731522813569 2024-11-13T18:34:25,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:34:25,991 INFO [RS:0;39e84130bbc9:35433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:34:25,992 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,35433,1731522813569] 2024-11-13T18:34:25,993 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,35433,1731522813569 already deleted, retry=false 2024-11-13T18:34:25,993 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,35433,1731522813569 expired; onlineServers=0 2024-11-13T18:34:25,994 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,40397,1731522813506' ***** 2024-11-13T18:34:25,994 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:34:25,994 DEBUG [M:0;39e84130bbc9:40397 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:34:25,994 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:34:25,994 DEBUG [M:0;39e84130bbc9:40397 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:34:25,994 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522813770 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522813770,5,FailOnTimeoutGroup] 2024-11-13T18:34:25,994 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522813771 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522813771,5,FailOnTimeoutGroup] 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:34:25,994 DEBUG [M:0;39e84130bbc9:40397 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:34:25,994 INFO [M:0;39e84130bbc9:40397 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:34:25,995 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:34:25,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:34:25,996 DEBUG [M:0;39e84130bbc9:40397 {}] zookeeper.ZKUtil(347): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:34:25,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:25,996 WARN [M:0;39e84130bbc9:40397 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:34:25,997 INFO [M:0;39e84130bbc9:40397 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/.lastflushedseqids 2024-11-13T18:34:26,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741849_1025 (size=130) 2024-11-13T18:34:26,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741849_1025 (size=130) 2024-11-13T18:34:26,014 INFO [M:0;39e84130bbc9:40397 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:34:26,014 INFO [M:0;39e84130bbc9:40397 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:34:26,014 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:34:26,014 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:26,014 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:26,014 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:34:26,014 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:26,014 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-11-13T18:34:26,042 DEBUG [M:0;39e84130bbc9:40397 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc15de3f79994edeb68b61076e04e4f6 is 82, key is hbase:meta,,1/info:regioninfo/1731522814420/Put/seqid=0 2024-11-13T18:34:26,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741850_1026 (size=5672) 2024-11-13T18:34:26,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741850_1026 (size=5672) 2024-11-13T18:34:26,056 INFO [M:0;39e84130bbc9:40397 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc15de3f79994edeb68b61076e04e4f6 2024-11-13T18:34:26,089 DEBUG [M:0;39e84130bbc9:40397 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b31c42845a6a41f1bf386ce8a17ca38b is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731522814923/Put/seqid=0 2024-11-13T18:34:26,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:34:26,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35433-0x100ed6052dc0001, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:34:26,093 INFO [RS:0;39e84130bbc9:35433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:34:26,093 INFO [RS:0;39e84130bbc9:35433 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,35433,1731522813569; zookeeper connection closed. 2024-11-13T18:34:26,094 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@51bfef5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@51bfef5 2024-11-13T18:34:26,094 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:34:26,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741851_1027 (size=7819) 2024-11-13T18:34:26,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741851_1027 (size=7819) 2024-11-13T18:34:26,103 INFO [M:0;39e84130bbc9:40397 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b31c42845a6a41f1bf386ce8a17ca38b 2024-11-13T18:34:26,109 INFO [M:0;39e84130bbc9:40397 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b31c42845a6a41f1bf386ce8a17ca38b 2024-11-13T18:34:26,132 DEBUG [M:0;39e84130bbc9:40397 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70f1be7dc95d45829cd4314bfdcae4f9 is 69, key is 39e84130bbc9,35433,1731522813569/rs:state/1731522813812/Put/seqid=0 2024-11-13T18:34:26,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741852_1028 (size=5156) 2024-11-13T18:34:26,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741852_1028 (size=5156) 2024-11-13T18:34:26,138 INFO [M:0;39e84130bbc9:40397 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70f1be7dc95d45829cd4314bfdcae4f9 2024-11-13T18:34:26,159 DEBUG [M:0;39e84130bbc9:40397 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7be0d0d7015c4091b8b0927108259337 is 52, key is load_balancer_on/state:d/1731522814506/Put/seqid=0 2024-11-13T18:34:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741853_1029 (size=5056) 2024-11-13T18:34:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741853_1029 (size=5056) 2024-11-13T18:34:26,166 INFO [M:0;39e84130bbc9:40397 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7be0d0d7015c4091b8b0927108259337 2024-11-13T18:34:26,172 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc15de3f79994edeb68b61076e04e4f6 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc15de3f79994edeb68b61076e04e4f6 2024-11-13T18:34:26,180 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc15de3f79994edeb68b61076e04e4f6, entries=8, sequenceid=121, filesize=5.5 K 2024-11-13T18:34:26,181 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b31c42845a6a41f1bf386ce8a17ca38b as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b31c42845a6a41f1bf386ce8a17ca38b 2024-11-13T18:34:26,187 INFO [M:0;39e84130bbc9:40397 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b31c42845a6a41f1bf386ce8a17ca38b 2024-11-13T18:34:26,187 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b31c42845a6a41f1bf386ce8a17ca38b, entries=14, sequenceid=121, filesize=7.6 K 2024-11-13T18:34:26,188 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/70f1be7dc95d45829cd4314bfdcae4f9 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/70f1be7dc95d45829cd4314bfdcae4f9 2024-11-13T18:34:26,194 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/70f1be7dc95d45829cd4314bfdcae4f9, entries=1, sequenceid=121, filesize=5.0 K 2024-11-13T18:34:26,195 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7be0d0d7015c4091b8b0927108259337 as hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7be0d0d7015c4091b8b0927108259337 2024-11-13T18:34:26,204 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43749/user/jenkins/test-data/3bc060f2-fbd0-4294-80ce-24b60480aaf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7be0d0d7015c4091b8b0927108259337, entries=1, sequenceid=121, filesize=4.9 K 2024-11-13T18:34:26,205 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 191ms, sequenceid=121, compaction requested=false 2024-11-13T18:34:26,209 INFO [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:26,209 DEBUG [M:0;39e84130bbc9:40397 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522866014Disabling compacts and flushes for region at 1731522866014Disabling writes for close at 1731522866014Obtaining lock to block concurrent updates at 1731522866014Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522866014Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44599, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1731522866015 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522866019 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522866019Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522866041 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522866041Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522866068 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522866089 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522866089Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522866109 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522866131 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522866131Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522866144 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522866159 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522866159Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79757f4: reopening flushed file at 1731522866171 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52625be1: reopening flushed file at 1731522866180 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7517cee2: reopening flushed file at 1731522866187 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4add007b: reopening flushed file at 1731522866194 (+7 ms)Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 191ms, sequenceid=121, compaction requested=false at 1731522866205 (+11 ms)Writing region close event to WAL at 1731522866209 (+4 ms)Closed at 1731522866209 2024-11-13T18:34:26,213 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:26,213 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:26,213 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:26,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:26,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:34:26,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34795 is added to blk_1073741830_1006 (size=52996) 2024-11-13T18:34:26,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38871 is added to blk_1073741830_1006 (size=52996) 2024-11-13T18:34:26,621 INFO [M:0;39e84130bbc9:40397 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:34:26,621 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:34:26,622 INFO [M:0;39e84130bbc9:40397 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40397 2024-11-13T18:34:26,622 INFO [M:0;39e84130bbc9:40397 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:34:26,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:26,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:26,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:34:26,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40397-0x100ed6052dc0000, quorum=127.0.0.1:58025, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:34:26,724 INFO [M:0;39e84130bbc9:40397 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:34:26,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b3ac0d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:34:26,730 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@144fb880{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:34:26,730 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:34:26,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22a4ff4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:34:26,730 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43498b11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,STOPPED} 2024-11-13T18:34:26,732 WARN [BP-1180493070-172.17.0.3-1731522812652 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:34:26,732 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:34:26,732 WARN [BP-1180493070-172.17.0.3-1731522812652 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180493070-172.17.0.3-1731522812652 (Datanode Uuid 7c38f4d0-ea7f-4e80-887b-f0d6c70658bf) service to localhost/127.0.0.1:43749 2024-11-13T18:34:26,732 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:34:26,733 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data3/current/BP-1180493070-172.17.0.3-1731522812652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:34:26,733 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data4/current/BP-1180493070-172.17.0.3-1731522812652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:34:26,733 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:34:26,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c020752{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:34:26,741 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ac52070{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:34:26,741 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:34:26,741 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fc56883{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:34:26,742 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1330929b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,STOPPED} 2024-11-13T18:34:26,744 WARN [BP-1180493070-172.17.0.3-1731522812652 heartbeating to localhost/127.0.0.1:43749 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:34:26,744 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:34:26,744 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:34:26,744 WARN [BP-1180493070-172.17.0.3-1731522812652 heartbeating to localhost/127.0.0.1:43749 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1180493070-172.17.0.3-1731522812652 (Datanode Uuid b4beaa62-e7fa-4c19-95a2-fd401bd2fc19) service to localhost/127.0.0.1:43749 2024-11-13T18:34:26,744 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data1/current/BP-1180493070-172.17.0.3-1731522812652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:34:26,745 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/cluster_88689959-f9a7-acb3-b9cb-c4d9eb482cf4/data/data2/current/BP-1180493070-172.17.0.3-1731522812652 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:34:26,745 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:34:26,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b612817{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:34:26,753 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35dc38db{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:34:26,753 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:34:26,753 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37ea919c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:34:26,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62558ec9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir/,STOPPED} 2024-11-13T18:34:26,760 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:34:26,777 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:34:26,786 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 181) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43749 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43749 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43749 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43749 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=419 (was 379) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2986 (was 1145) - AvailableMemoryMB LEAK? - 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=419, ProcessCount=11, AvailableMemoryMB=2986 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.log.dir so I do NOT create it in target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/397e5369-68d2-f586-597b-429af27482c2/hadoop.tmp.dir so I do NOT create it in target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837, deleteOnExit=true 2024-11-13T18:34:26,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/test.cache.data in system properties and HBase conf 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:34:26,797 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:34:26,797 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:34:26,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:34:26,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:34:26,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:34:26,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:34:26,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:34:26,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:34:26,818 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:34:26,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:34:26,908 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:34:26,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:34:26,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:34:26,914 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:34:26,917 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:34:26,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@540b6b2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:34:26,922 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51e0317a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:34:27,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e4041f6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/java.io.tmpdir/jetty-localhost-41875-hadoop-hdfs-3_4_1-tests_jar-_-any-6780467185230965771/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:34:27,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@c5b7d7a{HTTP/1.1, (http/1.1)}{localhost:41875} 2024-11-13T18:34:27,098 INFO [Time-limited test {}] server.Server(415): Started @243149ms 2024-11-13T18:34:27,119 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:34:27,236 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:34:27,238 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:34:27,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:34:27,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:34:27,244 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:34:27,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75d9b484{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:34:27,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@516e643a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:34:27,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7305dd28{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/java.io.tmpdir/jetty-localhost-33765-hadoop-hdfs-3_4_1-tests_jar-_-any-10675066025894452029/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:34:27,381 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f8818bb{HTTP/1.1, (http/1.1)}{localhost:33765} 2024-11-13T18:34:27,381 INFO [Time-limited test {}] server.Server(415): Started @243431ms 2024-11-13T18:34:27,382 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:34:27,426 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:34:27,430 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:34:27,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:34:27,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:34:27,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T18:34:27,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@412b5320{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:34:27,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3868302b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:34:27,483 WARN [Thread-1958 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data1/current/BP-795142871-172.17.0.3-1731522866841/current, will proceed with Du for space computation calculation, 2024-11-13T18:34:27,484 WARN [Thread-1959 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data2/current/BP-795142871-172.17.0.3-1731522866841/current, will proceed with Du for space computation calculation, 2024-11-13T18:34:27,535 WARN [Thread-1937 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:34:27,539 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ecf35d414f8ea0a with lease ID 0xb89b55d6618dac9: Processing first storage report for DS-821db67c-49d7-4b7d-9cd0-37bd0c08c930 from datanode DatanodeRegistration(127.0.0.1:35915, datanodeUuid=34ae6a58-a938-405e-892f-c45c46439678, infoPort=39469, infoSecurePort=0, ipcPort=44233, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841) 2024-11-13T18:34:27,539 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ecf35d414f8ea0a with lease ID 0xb89b55d6618dac9: from storage DS-821db67c-49d7-4b7d-9cd0-37bd0c08c930 node DatanodeRegistration(127.0.0.1:35915, datanodeUuid=34ae6a58-a938-405e-892f-c45c46439678, infoPort=39469, infoSecurePort=0, ipcPort=44233, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:34:27,539 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ecf35d414f8ea0a with lease ID 0xb89b55d6618dac9: Processing first storage report for DS-b84dc935-adfa-4bc0-8103-f216ee788d5a from datanode DatanodeRegistration(127.0.0.1:35915, datanodeUuid=34ae6a58-a938-405e-892f-c45c46439678, infoPort=39469, infoSecurePort=0, ipcPort=44233, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841) 2024-11-13T18:34:27,539 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ecf35d414f8ea0a with lease ID 0xb89b55d6618dac9: from storage DS-b84dc935-adfa-4bc0-8103-f216ee788d5a node DatanodeRegistration(127.0.0.1:35915, datanodeUuid=34ae6a58-a938-405e-892f-c45c46439678, infoPort=39469, infoSecurePort=0, ipcPort=44233, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:34:27,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@719b1e37{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/java.io.tmpdir/jetty-localhost-45033-hadoop-hdfs-3_4_1-tests_jar-_-any-6984262487310973336/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:34:27,615 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34accf12{HTTP/1.1, (http/1.1)}{localhost:45033} 2024-11-13T18:34:27,615 INFO [Time-limited test {}] server.Server(415): Started @243666ms 2024-11-13T18:34:27,617 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:34:27,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:27,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:27,743 WARN [Thread-1984 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data3/current/BP-795142871-172.17.0.3-1731522866841/current, will proceed with Du for space computation calculation, 2024-11-13T18:34:27,743 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data4/current/BP-795142871-172.17.0.3-1731522866841/current, will proceed with Du for space computation calculation, 2024-11-13T18:34:27,778 WARN [Thread-1973 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:34:27,781 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7920afaa109fc8ef with lease ID 0xb89b55d6618daca: Processing first storage report for DS-c0902eb9-93ae-4a56-a386-9e64bd1547dd from datanode DatanodeRegistration(127.0.0.1:46371, datanodeUuid=3cf52934-28d0-4c6a-b632-d8a27dce4551, infoPort=43903, infoSecurePort=0, ipcPort=33107, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841) 2024-11-13T18:34:27,781 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7920afaa109fc8ef with lease ID 0xb89b55d6618daca: from storage DS-c0902eb9-93ae-4a56-a386-9e64bd1547dd node DatanodeRegistration(127.0.0.1:46371, datanodeUuid=3cf52934-28d0-4c6a-b632-d8a27dce4551, infoPort=43903, infoSecurePort=0, ipcPort=33107, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:34:27,781 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7920afaa109fc8ef with lease ID 0xb89b55d6618daca: Processing first storage report for DS-8e26a2e8-198d-460f-abe7-af2c1f60a3c7 from datanode DatanodeRegistration(127.0.0.1:46371, datanodeUuid=3cf52934-28d0-4c6a-b632-d8a27dce4551, infoPort=43903, infoSecurePort=0, ipcPort=33107, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841) 2024-11-13T18:34:27,781 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7920afaa109fc8ef with lease ID 0xb89b55d6618daca: from storage DS-8e26a2e8-198d-460f-abe7-af2c1f60a3c7 node DatanodeRegistration(127.0.0.1:46371, datanodeUuid=3cf52934-28d0-4c6a-b632-d8a27dce4551, infoPort=43903, infoSecurePort=0, ipcPort=33107, storageInfo=lv=-57;cid=testClusterID;nsid=2092060858;c=1731522866841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:34:27,791 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7 2024-11-13T18:34:27,795 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/zookeeper_0, clientPort=61079, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:34:27,796 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61079 2024-11-13T18:34:27,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:34:27,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:34:27,809 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59 with version=8 2024-11-13T18:34:27,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:34:27,811 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:34:27,811 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:34:27,813 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43995 2024-11-13T18:34:27,815 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43995 connecting to ZooKeeper ensemble=127.0.0.1:61079 2024-11-13T18:34:27,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:439950x0, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:34:27,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43995-0x100ed6126fc0000 connected 2024-11-13T18:34:27,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,866 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:34:27,868 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59, hbase.cluster.distributed=false 2024-11-13T18:34:27,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:34:27,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43995 2024-11-13T18:34:27,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43995 2024-11-13T18:34:27,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43995 2024-11-13T18:34:27,881 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43995 2024-11-13T18:34:27,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43995 2024-11-13T18:34:27,902 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:34:27,902 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:34:27,903 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41111 2024-11-13T18:34:27,904 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41111 connecting to ZooKeeper ensemble=127.0.0.1:61079 2024-11-13T18:34:27,905 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,907 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:27,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411110x0, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:34:27,928 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:411110x0, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:34:27,928 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:34:27,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41111-0x100ed6126fc0001 connected 2024-11-13T18:34:27,933 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:34:27,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:34:27,937 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:34:27,939 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41111 2024-11-13T18:34:27,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41111 2024-11-13T18:34:27,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41111 2024-11-13T18:34:27,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41111 2024-11-13T18:34:27,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41111 2024-11-13T18:34:27,968 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:43995 2024-11-13T18:34:27,968 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,43995,1731522867811 2024-11-13T18:34:27,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:34:27,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:34:27,971 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,43995,1731522867811 2024-11-13T18:34:27,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:34:27,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:27,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:27,974 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:34:27,975 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,43995,1731522867811 from backup master directory 2024-11-13T18:34:27,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,43995,1731522867811 2024-11-13T18:34:27,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:34:27,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:34:27,976 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:34:27,976 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,43995,1731522867811 2024-11-13T18:34:27,982 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/hbase.id] with ID: 952f3a03-13da-4e72-a9ff-ef6ad347546d 2024-11-13T18:34:27,982 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/.tmp/hbase.id 2024-11-13T18:34:28,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:34:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:34:28,016 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/.tmp/hbase.id]:[hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/hbase.id] 2024-11-13T18:34:28,032 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:28,032 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:34:28,034 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T18:34:28,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:34:28,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:34:28,058 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:34:28,059 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:34:28,061 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:34:28,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:34:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:34:28,483 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store 2024-11-13T18:34:28,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:34:28,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:34:28,494 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:28,494 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:34:28,494 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522868494Disabling compacts and flushes for region at 1731522868494Disabling writes for close at 1731522868494Writing region close event to WAL at 1731522868494Closed at 1731522868494 2024-11-13T18:34:28,495 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/.initializing 2024-11-13T18:34:28,495 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/WALs/39e84130bbc9,43995,1731522867811 2024-11-13T18:34:28,499 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C43995%2C1731522867811, suffix=, logDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/WALs/39e84130bbc9,43995,1731522867811, archiveDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/oldWALs, maxLogs=10 2024-11-13T18:34:28,499 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C43995%2C1731522867811.1731522868499 2024-11-13T18:34:28,505 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/WALs/39e84130bbc9,43995,1731522867811/39e84130bbc9%2C43995%2C1731522867811.1731522868499 2024-11-13T18:34:28,506 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39469:39469),(127.0.0.1/127.0.0.1:43903:43903)] 2024-11-13T18:34:28,513 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:34:28,514 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:28,514 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,514 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,519 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:34:28,520 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:28,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:34:28,522 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:28,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:34:28,524 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:28,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:34:28,525 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:28,526 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,527 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,527 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,528 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,528 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,529 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:34:28,530 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:34:28,534 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:34:28,534 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861577, jitterRate=0.09555244445800781}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:34:28,535 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522868514Initializing all the Stores at 1731522868515 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522868515Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522868518 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522868518Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522868518Cleaning up temporary data from old regions at 1731522868528 (+10 ms)Region opened successfully at 1731522868535 (+7 ms) 2024-11-13T18:34:28,535 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:34:28,539 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4caf62ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:34:28,540 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:34:28,540 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:34:28,540 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:34:28,540 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:34:28,541 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:34:28,542 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:34:28,542 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:34:28,555 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:34:28,556 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:34:28,558 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:34:28,558 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:34:28,559 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:34:28,561 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:34:28,561 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:34:28,562 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:34:28,563 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:34:28,564 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:34:28,566 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:34:28,569 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:34:28,570 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:34:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:34:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:34:28,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,574 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,43995,1731522867811, sessionid=0x100ed6126fc0000, setting cluster-up flag (Was=false) 2024-11-13T18:34:28,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,582 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:34:28,583 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,43995,1731522867811 2024-11-13T18:34:28,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:28,592 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:34:28,594 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,43995,1731522867811 2024-11-13T18:34:28,596 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:34:28,597 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:34:28,598 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:34:28,598 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:34:28,598 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,43995,1731522867811 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:34:28,600 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,607 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:34:28,607 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:34:28,608 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,608 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522898614 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:34:28,614 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,615 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:34:28,615 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:34:28,615 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:34:28,616 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:34:28,616 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:34:28,616 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522868616,5,FailOnTimeoutGroup] 2024-11-13T18:34:28,617 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522868616,5,FailOnTimeoutGroup] 2024-11-13T18:34:28,617 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,617 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:34:28,617 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,617 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:34:28,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:34:28,625 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:34:28,625 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59 2024-11-13T18:34:28,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:34:28,636 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:28,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:34:28,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:34:28,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:34:28,639 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:28,640 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:34:28,641 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:34:28,641 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:28,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:34:28,643 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:34:28,643 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:28,644 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:34:28,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:34:28,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:28,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:28,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:34:28,649 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740 2024-11-13T18:34:28,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740 2024-11-13T18:34:28,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:34:28,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:34:28,652 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:34:28,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:34:28,656 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:34:28,657 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695944, jitterRate=-0.11506186425685883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:34:28,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522868636Initializing all the Stores at 1731522868638 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522868638Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522868638Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522868638Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522868638Cleaning up temporary data from old regions at 1731522868651 (+13 ms)Region opened successfully at 1731522868657 (+6 ms) 2024-11-13T18:34:28,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:34:28,657 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:34:28,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:34:28,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:34:28,657 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:34:28,667 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(746): ClusterId : 952f3a03-13da-4e72-a9ff-ef6ad347546d 2024-11-13T18:34:28,667 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:34:28,670 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:34:28,670 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:34:28,673 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:34:28,673 DEBUG [RS:0;39e84130bbc9:41111 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74586adc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:34:28,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:28,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:28,691 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:34:28,692 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522868657Disabling compacts and flushes for region at 1731522868657Disabling writes for close at 1731522868657Writing region close event to WAL at 1731522868691 (+34 ms)Closed at 1731522868691 2024-11-13T18:34:28,693 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:34:28,693 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:34:28,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:34:28,696 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:34:28,697 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:34:28,701 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:41111 2024-11-13T18:34:28,701 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:34:28,701 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:34:28,701 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:34:28,703 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,43995,1731522867811 with port=41111, startcode=1731522867902 2024-11-13T18:34:28,703 DEBUG [RS:0;39e84130bbc9:41111 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:34:28,705 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56415, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:34:28,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,707 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59 2024-11-13T18:34:28,707 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41889 2024-11-13T18:34:28,707 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:34:28,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:34:28,710 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,41111,1731522867902] 2024-11-13T18:34:28,711 DEBUG [RS:0;39e84130bbc9:41111 {}] zookeeper.ZKUtil(111): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,711 WARN [RS:0;39e84130bbc9:41111 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:34:28,711 INFO [RS:0;39e84130bbc9:41111 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:34:28,711 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,716 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:34:28,721 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:34:28,723 INFO [RS:0;39e84130bbc9:41111 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:34:28,723 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,723 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:34:28,724 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:34:28,725 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:34:28,725 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:34:28,726 DEBUG [RS:0;39e84130bbc9:41111 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,733 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,41111,1731522867902-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:34:28,756 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:34:28,756 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,41111,1731522867902-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,756 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,757 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.Replication(171): 39e84130bbc9,41111,1731522867902 started 2024-11-13T18:34:28,773 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:28,773 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,41111,1731522867902, RpcServer on 39e84130bbc9/172.17.0.3:41111, sessionid=0x100ed6126fc0001 2024-11-13T18:34:28,773 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:34:28,773 DEBUG [RS:0;39e84130bbc9:41111 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,773 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,41111,1731522867902' 2024-11-13T18:34:28,773 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,41111,1731522867902 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,41111,1731522867902' 2024-11-13T18:34:28,774 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:34:28,775 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:34:28,775 DEBUG [RS:0;39e84130bbc9:41111 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:34:28,775 INFO [RS:0;39e84130bbc9:41111 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:34:28,775 INFO [RS:0;39e84130bbc9:41111 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:34:28,847 WARN [39e84130bbc9:43995 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:34:28,879 INFO [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C41111%2C1731522867902, suffix=, logDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902, archiveDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs, maxLogs=32 2024-11-13T18:34:28,880 INFO [RS:0;39e84130bbc9:41111 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C41111%2C1731522867902.1731522868880 2024-11-13T18:34:28,891 INFO [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522868880 2024-11-13T18:34:28,899 DEBUG [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43903:43903),(127.0.0.1/127.0.0.1:39469:39469)] 2024-11-13T18:34:29,098 DEBUG [39e84130bbc9:43995 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:34:29,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:29,100 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,41111,1731522867902, state=OPENING 2024-11-13T18:34:29,102 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:34:29,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:29,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:34:29,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:34:29,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:34:29,105 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:34:29,105 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,41111,1731522867902}] 2024-11-13T18:34:29,266 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:34:29,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:34:29,293 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:34:29,294 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:34:29,296 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C41111%2C1731522867902.meta, suffix=.meta, logDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902, archiveDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs, maxLogs=32 2024-11-13T18:34:29,297 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C41111%2C1731522867902.meta.1731522869297.meta 2024-11-13T18:34:29,303 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.meta.1731522869297.meta 2024-11-13T18:34:29,314 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43903:43903),(127.0.0.1/127.0.0.1:39469:39469)] 2024-11-13T18:34:29,335 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:34:29,335 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:34:29,336 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:34:29,336 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:34:29,336 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:34:29,336 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:29,336 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:34:29,336 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:34:29,353 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:34:29,362 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:34:29,362 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:29,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:34:29,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:34:29,363 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:29,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:34:29,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:34:29,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:29,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:34:29,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:34:29,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:34:29,368 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:34:29,368 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740 2024-11-13T18:34:29,370 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740 2024-11-13T18:34:29,371 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:34:29,371 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:34:29,373 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:34:29,375 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:34:29,376 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732181, jitterRate=-0.06898452341556549}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:34:29,376 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:34:29,376 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522869336Writing region info on filesystem at 1731522869336Initializing all the Stores at 1731522869347 (+11 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522869348 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522869349 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522869349Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522869349Cleaning up temporary data from old regions at 1731522869371 (+22 ms)Running coprocessor post-open hooks at 1731522869376 (+5 ms)Region opened successfully at 1731522869376 2024-11-13T18:34:29,380 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522869265 2024-11-13T18:34:29,389 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:34:29,389 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:34:29,390 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:29,391 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,41111,1731522867902, state=OPEN 2024-11-13T18:34:29,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:34:29,396 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:34:29,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:34:29,397 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:29,397 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:34:29,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:34:29,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,41111,1731522867902 in 292 msec 2024-11-13T18:34:29,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:34:29,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 707 msec 2024-11-13T18:34:29,438 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:34:29,438 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:34:29,440 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:34:29,440 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,41111,1731522867902, seqNum=-1] 2024-11-13T18:34:29,441 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:34:29,442 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:34:29,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 850 msec 2024-11-13T18:34:29,448 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522869448, completionTime=-1 2024-11-13T18:34:29,449 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:34:29,449 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522929451 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731522989451 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,451 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:43995, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,452 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,453 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,454 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.481sec 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:34:29,457 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:34:29,461 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:34:29,461 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:34:29,461 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43995,1731522867811-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:34:29,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32e48481, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:34:29,469 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,43995,-1 for getting cluster id 2024-11-13T18:34:29,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:34:29,474 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '952f3a03-13da-4e72-a9ff-ef6ad347546d' 2024-11-13T18:34:29,474 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:34:29,474 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "952f3a03-13da-4e72-a9ff-ef6ad347546d" 2024-11-13T18:34:29,474 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f0f668, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:34:29,474 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,43995,-1] 2024-11-13T18:34:29,475 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:34:29,475 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:34:29,476 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60376, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:34:29,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ea28eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:34:29,477 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:34:29,478 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,41111,1731522867902, seqNum=-1] 2024-11-13T18:34:29,479 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:34:29,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47498, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:34:29,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,43995,1731522867811 2024-11-13T18:34:29,482 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:34:29,485 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:34:29,486 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T18:34:29,487 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 39e84130bbc9,43995,1731522867811 2024-11-13T18:34:29,487 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7bd08b43 2024-11-13T18:34:29,487 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T18:34:29,489 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60392, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T18:34:29,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T18:34:29,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T18:34:29,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:34:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-13T18:34:29,494 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T18:34:29,494 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-13T18:34:29,495 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T18:34:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:34:29,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741835_1011 (size=381) 2024-11-13T18:34:29,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741835_1011 (size=381) 2024-11-13T18:34:29,518 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4d4bed97268a796b28eb74cdec559a63, NAME => 'TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59 2024-11-13T18:34:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741836_1012 (size=64) 2024-11-13T18:34:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741836_1012 (size=64) 2024-11-13T18:34:29,526 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:29,526 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 4d4bed97268a796b28eb74cdec559a63, disabling compactions & flushes 2024-11-13T18:34:29,526 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,527 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,527 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. after waiting 0 ms 2024-11-13T18:34:29,527 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,527 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,527 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4d4bed97268a796b28eb74cdec559a63: Waiting for close lock at 1731522869526Disabling compacts and flushes for region at 1731522869526Disabling writes for close at 1731522869527 (+1 ms)Writing region close event to WAL at 1731522869527Closed at 1731522869527 2024-11-13T18:34:29,528 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T18:34:29,529 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731522869528"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522869528"}]},"ts":"1731522869528"} 2024-11-13T18:34:29,532 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T18:34:29,533 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T18:34:29,533 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522869533"}]},"ts":"1731522869533"} 2024-11-13T18:34:29,536 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-13T18:34:29,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, ASSIGN}] 2024-11-13T18:34:29,538 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, ASSIGN 2024-11-13T18:34:29,539 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, ASSIGN; state=OFFLINE, location=39e84130bbc9,41111,1731522867902; forceNewPlan=false, retain=false 2024-11-13T18:34:29,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:29,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:29,690 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4d4bed97268a796b28eb74cdec559a63, regionState=OPENING, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:29,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, ASSIGN because future has completed 2024-11-13T18:34:29,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902}] 2024-11-13T18:34:29,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T18:34:29,861 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,861 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4d4bed97268a796b28eb74cdec559a63, NAME => 'TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:34:29,861 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,861 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:29,862 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,862 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,871 INFO [StoreOpener-4d4bed97268a796b28eb74cdec559a63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,874 INFO [StoreOpener-4d4bed97268a796b28eb74cdec559a63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4d4bed97268a796b28eb74cdec559a63 columnFamilyName info 2024-11-13T18:34:29,874 DEBUG [StoreOpener-4d4bed97268a796b28eb74cdec559a63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:29,875 INFO [StoreOpener-4d4bed97268a796b28eb74cdec559a63-1 {}] regionserver.HStore(327): Store=4d4bed97268a796b28eb74cdec559a63/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:29,877 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,878 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,879 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,881 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,882 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,890 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,893 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:34:29,893 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4d4bed97268a796b28eb74cdec559a63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800798, jitterRate=0.018267497420310974}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:34:29,893 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:29,894 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4d4bed97268a796b28eb74cdec559a63: Running coprocessor pre-open hook at 1731522869862Writing region info on filesystem at 1731522869862Initializing all the Stores at 1731522869866 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522869866Cleaning up temporary data from old regions at 1731522869882 (+16 ms)Running coprocessor post-open hooks at 1731522869893 (+11 ms)Region opened successfully at 1731522869894 (+1 ms) 2024-11-13T18:34:29,896 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., pid=6, masterSystemTime=1731522869850 2024-11-13T18:34:29,915 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,915 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:29,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4d4bed97268a796b28eb74cdec559a63, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:29,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 because future has completed 2024-11-13T18:34:29,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T18:34:29,957 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 in 248 msec 2024-11-13T18:34:29,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T18:34:29,977 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, ASSIGN in 421 msec 2024-11-13T18:34:29,979 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T18:34:29,979 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731522869979"}]},"ts":"1731522869979"} 2024-11-13T18:34:29,982 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-13T18:34:29,983 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T18:34:29,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 493 msec 2024-11-13T18:34:30,016 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,016 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,017 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,018 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,557 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:30,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:30,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:31,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:31,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:32,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:32,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:33,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:33,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:34,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:34,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:34,716 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T18:34:34,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-13T18:34:35,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:35,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:36,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:36,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:37,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:37,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:38,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:38,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:39,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43995 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T18:34:39,593 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-13T18:34:39,593 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-13T18:34:39,596 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-13T18:34:39,596 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:39,599 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2] 2024-11-13T18:34:39,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:39,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:34:39,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/05ce2e598d25406381976fb05cb031fb is 1080, key is row0001/info:/1731522879601/Put/seqid=0 2024-11-13T18:34:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741837_1013 (size=12509) 2024-11-13T18:34:39,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/05ce2e598d25406381976fb05cb031fb 2024-11-13T18:34:39,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741837_1013 (size=12509) 2024-11-13T18:34:39,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/05ce2e598d25406381976fb05cb031fb as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb 2024-11-13T18:34:39,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T18:34:39,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:47498 deadline: 1731522889660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:39,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T18:34:39,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 4d4bed97268a796b28eb74cdec559a63 in 64ms, sequenceid=11, compaction requested=false 2024-11-13T18:34:39,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:39,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:39,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:39,695 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:34:39,696 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:34:39,696 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 because the exception is null or not the one we care about 2024-11-13T18:34:39,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T18:34:39,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T18:34:40,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:40,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:41,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:41,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:42,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:42,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:43,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:43,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:44,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:44,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:45,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:45,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:46,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:46,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:47,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:47,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:48,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:48,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:49,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:49,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:49,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:49,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T18:34:49,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/c29e44aaedea400980ea36bdb93a137e is 1080, key is row0008/info:/1731522879615/Put/seqid=0 2024-11-13T18:34:49,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741838_1014 (size=29761) 2024-11-13T18:34:49,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/c29e44aaedea400980ea36bdb93a137e 2024-11-13T18:34:49,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741838_1014 (size=29761) 2024-11-13T18:34:49,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/c29e44aaedea400980ea36bdb93a137e as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e 2024-11-13T18:34:49,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e, entries=23, sequenceid=37, filesize=29.1 K 2024-11-13T18:34:49,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 4d4bed97268a796b28eb74cdec559a63 in 26ms, sequenceid=37, compaction requested=false 2024-11-13T18:34:49,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:49,782 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-13T18:34:49,782 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:49,782 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e because midkey is the same as first or last row 2024-11-13T18:34:50,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:50,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:51,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:51,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:51,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:34:51,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/16b123c895e9471bab35d37b77420a9c is 1080, key is row0031/info:/1731522889756/Put/seqid=0 2024-11-13T18:34:51,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741839_1015 (size=12509) 2024-11-13T18:34:51,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741839_1015 (size=12509) 2024-11-13T18:34:51,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/16b123c895e9471bab35d37b77420a9c 2024-11-13T18:34:51,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/16b123c895e9471bab35d37b77420a9c as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c 2024-11-13T18:34:51,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c, entries=7, sequenceid=47, filesize=12.2 K 2024-11-13T18:34:51,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 4d4bed97268a796b28eb74cdec559a63 in 57ms, sequenceid=47, compaction requested=true 2024-11-13T18:34:51,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:51,832 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-13T18:34:51,832 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:51,832 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e because midkey is the same as first or last row 2024-11-13T18:34:51,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d4bed97268a796b28eb74cdec559a63:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:34:51,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:51,832 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:34:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:51,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-13T18:34:51,834 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:34:51,834 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 4d4bed97268a796b28eb74cdec559a63/info is initiating minor compaction (all files) 2024-11-13T18:34:51,834 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4d4bed97268a796b28eb74cdec559a63/info in TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:51,835 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp, totalSize=53.5 K 2024-11-13T18:34:51,835 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 05ce2e598d25406381976fb05cb031fb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731522879601 2024-11-13T18:34:51,836 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting c29e44aaedea400980ea36bdb93a137e, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731522879615 2024-11-13T18:34:51,836 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 16b123c895e9471bab35d37b77420a9c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731522889756 2024-11-13T18:34:51,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/ea029192dc8f4cbfb27a99fc27d1de86 is 1080, key is row0038/info:/1731522891779/Put/seqid=0 2024-11-13T18:34:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741840_1016 (size=27607) 2024-11-13T18:34:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741840_1016 (size=27607) 2024-11-13T18:34:51,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/ea029192dc8f4cbfb27a99fc27d1de86 2024-11-13T18:34:51,855 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d4bed97268a796b28eb74cdec559a63#info#compaction#60 average throughput is 12.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:34:51,856 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/f210b334d77d401ca47e01c93777c881 is 1080, key is row0001/info:/1731522879601/Put/seqid=0 2024-11-13T18:34:51,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/ea029192dc8f4cbfb27a99fc27d1de86 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86 2024-11-13T18:34:51,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86, entries=21, sequenceid=71, filesize=27.0 K 2024-11-13T18:34:51,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=5.25 KB/5380 for 4d4bed97268a796b28eb74cdec559a63 in 40ms, sequenceid=71, compaction requested=false 2024-11-13T18:34:51,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:51,874 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=80.5 K, sizeToCheck=16.0 K 2024-11-13T18:34:51,874 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:51,874 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e because midkey is the same as first or last row 2024-11-13T18:34:51,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741841_1017 (size=44978) 2024-11-13T18:34:51,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741841_1017 (size=44978) 2024-11-13T18:34:51,905 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/f210b334d77d401ca47e01c93777c881 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 2024-11-13T18:34:51,915 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4d4bed97268a796b28eb74cdec559a63/info of 4d4bed97268a796b28eb74cdec559a63 into f210b334d77d401ca47e01c93777c881(size=43.9 K), total size for store is 70.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:34:51,915 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:51,915 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., storeName=4d4bed97268a796b28eb74cdec559a63/info, priority=13, startTime=1731522891832; duration=0sec 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=70.9 K, sizeToCheck=16.0 K 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 because midkey is the same as first or last row 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=70.9 K, sizeToCheck=16.0 K 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 because midkey is the same as first or last row 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=70.9 K, sizeToCheck=16.0 K 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 because midkey is the same as first or last row 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:51,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d4bed97268a796b28eb74cdec559a63:info 2024-11-13T18:34:52,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:52,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:53,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:53,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:53,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:53,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:34:53,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/7bfceb75186546b5acf104d2cebad281 is 1080, key is row0059/info:/1731522891835/Put/seqid=0 2024-11-13T18:34:53,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741842_1018 (size=12509) 2024-11-13T18:34:53,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741842_1018 (size=12509) 2024-11-13T18:34:53,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/7bfceb75186546b5acf104d2cebad281 2024-11-13T18:34:53,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/7bfceb75186546b5acf104d2cebad281 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281 2024-11-13T18:34:53,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281, entries=7, sequenceid=82, filesize=12.2 K 2024-11-13T18:34:53,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 4d4bed97268a796b28eb74cdec559a63 in 38ms, sequenceid=82, compaction requested=true 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 because midkey is the same as first or last row 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4d4bed97268a796b28eb74cdec559a63:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:34:53,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:53,916 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:34:53,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:53,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T18:34:53,918 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:34:53,918 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 4d4bed97268a796b28eb74cdec559a63/info is initiating minor compaction (all files) 2024-11-13T18:34:53,918 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4d4bed97268a796b28eb74cdec559a63/info in TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:53,918 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp, totalSize=83.1 K 2024-11-13T18:34:53,918 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting f210b334d77d401ca47e01c93777c881, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731522879601 2024-11-13T18:34:53,919 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting ea029192dc8f4cbfb27a99fc27d1de86, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1731522891779 2024-11-13T18:34:53,920 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7bfceb75186546b5acf104d2cebad281, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731522891835 2024-11-13T18:34:53,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/1f5b4e2088054d599bcb66b966cc4ab0 is 1080, key is row0066/info:/1731522893879/Put/seqid=0 2024-11-13T18:34:53,942 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4d4bed97268a796b28eb74cdec559a63#info#compaction#63 average throughput is 16.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:34:53,943 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/c9641bea28174078830054e84a2bcd6e is 1080, key is row0001/info:/1731522879601/Put/seqid=0 2024-11-13T18:34:53,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741843_1019 (size=20064) 2024-11-13T18:34:53,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/1f5b4e2088054d599bcb66b966cc4ab0 2024-11-13T18:34:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741843_1019 (size=20064) 2024-11-13T18:34:53,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-13T18:34:53,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:47498 deadline: 1731522903959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:53,960 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:34:53,961 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:34:53,961 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 because the exception is null or not the one we care about 2024-11-13T18:34:53,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/1f5b4e2088054d599bcb66b966cc4ab0 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/1f5b4e2088054d599bcb66b966cc4ab0 2024-11-13T18:34:53,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/1f5b4e2088054d599bcb66b966cc4ab0, entries=14, sequenceid=99, filesize=19.6 K 2024-11-13T18:34:53,970 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 4d4bed97268a796b28eb74cdec559a63 in 53ms, sequenceid=99, compaction requested=false 2024-11-13T18:34:53,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:53,970 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-11-13T18:34:53,970 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:53,970 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 because midkey is the same as first or last row 2024-11-13T18:34:53,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741844_1020 (size=75378) 2024-11-13T18:34:53,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741844_1020 (size=75378) 2024-11-13T18:34:53,981 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/c9641bea28174078830054e84a2bcd6e as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e 2024-11-13T18:34:53,990 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4d4bed97268a796b28eb74cdec559a63/info of 4d4bed97268a796b28eb74cdec559a63 into c9641bea28174078830054e84a2bcd6e(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4d4bed97268a796b28eb74cdec559a63: 2024-11-13T18:34:53,990 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., storeName=4d4bed97268a796b28eb74cdec559a63/info, priority=13, startTime=1731522893916; duration=0sec 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-13T18:34:53,990 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T18:34:53,992 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:53,992 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:53,992 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4d4bed97268a796b28eb74cdec559a63:info 2024-11-13T18:34:53,995 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] assignment.AssignmentManager(1355): Split request from 39e84130bbc9,41111,1731522867902, parent={ENCODED => 4d4bed97268a796b28eb74cdec559a63, NAME => 'TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-13T18:34:54,002 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,007 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4d4bed97268a796b28eb74cdec559a63, daughterA=36e76e28ea43bc826997975f0d3d11c5, daughterB=910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4d4bed97268a796b28eb74cdec559a63, daughterA=36e76e28ea43bc826997975f0d3d11c5, daughterB=910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4d4bed97268a796b28eb74cdec559a63, daughterA=36e76e28ea43bc826997975f0d3d11c5, daughterB=910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4d4bed97268a796b28eb74cdec559a63, daughterA=36e76e28ea43bc826997975f0d3d11c5, daughterB=910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, UNASSIGN}] 2024-11-13T18:34:54,019 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, UNASSIGN 2024-11-13T18:34:54,021 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4d4bed97268a796b28eb74cdec559a63, regionState=CLOSING, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, UNASSIGN because future has completed 2024-11-13T18:34:54,024 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T18:34:54,024 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902}] 2024-11-13T18:34:54,181 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,181 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T18:34:54,182 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 4d4bed97268a796b28eb74cdec559a63, disabling compactions & flushes 2024-11-13T18:34:54,182 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:54,182 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:54,182 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. after waiting 0 ms 2024-11-13T18:34:54,182 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:54,182 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 4d4bed97268a796b28eb74cdec559a63 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T18:34:54,186 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/be46a4724ee74b33b396eaade6ecda14 is 1080, key is row0080/info:/1731522893918/Put/seqid=0 2024-11-13T18:34:54,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741845_1021 (size=21141) 2024-11-13T18:34:54,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741845_1021 (size=21141) 2024-11-13T18:34:54,192 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/be46a4724ee74b33b396eaade6ecda14 2024-11-13T18:34:54,198 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/.tmp/info/be46a4724ee74b33b396eaade6ecda14 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/be46a4724ee74b33b396eaade6ecda14 2024-11-13T18:34:54,204 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/be46a4724ee74b33b396eaade6ecda14, entries=15, sequenceid=118, filesize=20.6 K 2024-11-13T18:34:54,206 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for 4d4bed97268a796b28eb74cdec559a63 in 24ms, sequenceid=118, compaction requested=true 2024-11-13T18:34:54,207 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281] to archive 2024-11-13T18:34:54,208 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:34:54,210 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/05ce2e598d25406381976fb05cb031fb 2024-11-13T18:34:54,211 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c29e44aaedea400980ea36bdb93a137e 2024-11-13T18:34:54,212 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/f210b334d77d401ca47e01c93777c881 2024-11-13T18:34:54,213 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/16b123c895e9471bab35d37b77420a9c 2024-11-13T18:34:54,214 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/ea029192dc8f4cbfb27a99fc27d1de86 2024-11-13T18:34:54,215 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/7bfceb75186546b5acf104d2cebad281 2024-11-13T18:34:54,221 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=1 2024-11-13T18:34:54,222 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. 2024-11-13T18:34:54,222 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 4d4bed97268a796b28eb74cdec559a63: Waiting for close lock at 1731522894182Running coprocessor pre-close hooks at 1731522894182Disabling compacts and flushes for region at 1731522894182Disabling writes for close at 1731522894182Obtaining lock to block concurrent updates at 1731522894182Preparing flush snapshotting stores in 4d4bed97268a796b28eb74cdec559a63 at 1731522894182Finished memstore snapshotting TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., syncing WAL and waiting on mvcc, flushsize=dataSize=16140, getHeapSize=17520, getOffHeapSize=0, getCellsCount=15 at 1731522894182Flushing stores of TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. at 1731522894183 (+1 ms)Flushing 4d4bed97268a796b28eb74cdec559a63/info: creating writer at 1731522894183Flushing 4d4bed97268a796b28eb74cdec559a63/info: appending metadata at 1731522894186 (+3 ms)Flushing 4d4bed97268a796b28eb74cdec559a63/info: closing flushed file at 1731522894186Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2efd6ce1: reopening flushed file at 1731522894197 (+11 ms)Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=0 B/0 for 4d4bed97268a796b28eb74cdec559a63 in 24ms, sequenceid=118, compaction requested=true at 1731522894206 (+9 ms)Writing region close event to WAL at 1731522894218 (+12 ms)Running coprocessor post-close hooks at 1731522894222 (+4 ms)Closed at 1731522894222 2024-11-13T18:34:54,225 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,226 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=4d4bed97268a796b28eb74cdec559a63, regionState=CLOSED 2024-11-13T18:34:54,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 because future has completed 2024-11-13T18:34:54,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-13T18:34:54,232 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 4d4bed97268a796b28eb74cdec559a63, server=39e84130bbc9,41111,1731522867902 in 206 msec 2024-11-13T18:34:54,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T18:34:54,235 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4d4bed97268a796b28eb74cdec559a63, UNASSIGN in 214 msec 2024-11-13T18:34:54,245 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:54,250 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=4d4bed97268a796b28eb74cdec559a63, threads=3 2024-11-13T18:34:54,252 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/1f5b4e2088054d599bcb66b966cc4ab0 for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,252 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/be46a4724ee74b33b396eaade6ecda14 for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,252 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,269 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/1f5b4e2088054d599bcb66b966cc4ab0, top=true 2024-11-13T18:34:54,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741846_1022 (size=27) 2024-11-13T18:34:54,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741846_1022 (size=27) 2024-11-13T18:34:54,274 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/be46a4724ee74b33b396eaade6ecda14, top=true 2024-11-13T18:34:54,277 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0 for child: 910931266edfbb972760e2de1949a1f7, parent: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,278 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/1f5b4e2088054d599bcb66b966cc4ab0 for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,284 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14 for child: 910931266edfbb972760e2de1949a1f7, parent: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,284 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/be46a4724ee74b33b396eaade6ecda14 for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741847_1023 (size=27) 2024-11-13T18:34:54,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741847_1023 (size=27) 2024-11-13T18:34:54,295 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e for region: 4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:34:54,297 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 4d4bed97268a796b28eb74cdec559a63 Daughter A: [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63] storefiles, Daughter B: [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63] storefiles. 2024-11-13T18:34:54,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741848_1024 (size=71) 2024-11-13T18:34:54,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741848_1024 (size=71) 2024-11-13T18:34:54,320 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:54,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741849_1025 (size=71) 2024-11-13T18:34:54,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741849_1025 (size=71) 2024-11-13T18:34:54,357 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:54,367 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-13T18:34:54,370 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/recovered.edits/121.seqid, newMaxSeqId=121, maxSeqId=-1 2024-11-13T18:34:54,373 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731522894372"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731522894372"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731522894372"}]},"ts":"1731522894372"} 2024-11-13T18:34:54,373 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731522894372"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522894372"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731522894372"}]},"ts":"1731522894372"} 2024-11-13T18:34:54,373 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731522894372"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731522894372"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731522894372"}]},"ts":"1731522894372"} 2024-11-13T18:34:54,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=910931266edfbb972760e2de1949a1f7, ASSIGN}] 2024-11-13T18:34:54,402 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5, ASSIGN 2024-11-13T18:34:54,403 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=910931266edfbb972760e2de1949a1f7, ASSIGN 2024-11-13T18:34:54,403 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5, ASSIGN; state=SPLITTING_NEW, location=39e84130bbc9,41111,1731522867902; forceNewPlan=false, retain=false 2024-11-13T18:34:54,404 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=910931266edfbb972760e2de1949a1f7, ASSIGN; state=SPLITTING_NEW, location=39e84130bbc9,41111,1731522867902; forceNewPlan=false, retain=false 2024-11-13T18:34:54,554 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=910931266edfbb972760e2de1949a1f7, regionState=OPENING, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,554 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=36e76e28ea43bc826997975f0d3d11c5, regionState=OPENING, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=910931266edfbb972760e2de1949a1f7, ASSIGN because future has completed 2024-11-13T18:34:54,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 910931266edfbb972760e2de1949a1f7, server=39e84130bbc9,41111,1731522867902}] 2024-11-13T18:34:54,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5, ASSIGN because future has completed 2024-11-13T18:34:54,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36e76e28ea43bc826997975f0d3d11c5, server=39e84130bbc9,41111,1731522867902}] 2024-11-13T18:34:54,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:54,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:54,716 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:34:54,717 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 36e76e28ea43bc826997975f0d3d11c5, NAME => 'TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-13T18:34:54,717 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,717 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:54,717 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,717 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,719 INFO [StoreOpener-36e76e28ea43bc826997975f0d3d11c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,720 INFO [StoreOpener-36e76e28ea43bc826997975f0d3d11c5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 36e76e28ea43bc826997975f0d3d11c5 columnFamilyName info 2024-11-13T18:34:54,720 DEBUG [StoreOpener-36e76e28ea43bc826997975f0d3d11c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:54,735 DEBUG [StoreOpener-36e76e28ea43bc826997975f0d3d11c5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-bottom 2024-11-13T18:34:54,735 INFO [StoreOpener-36e76e28ea43bc826997975f0d3d11c5-1 {}] regionserver.HStore(327): Store=36e76e28ea43bc826997975f0d3d11c5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:54,736 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,737 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,738 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,739 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,739 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,741 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,742 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 36e76e28ea43bc826997975f0d3d11c5; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689900, jitterRate=-0.12274797260761261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:34:54,742 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:34:54,743 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 36e76e28ea43bc826997975f0d3d11c5: Running coprocessor pre-open hook at 1731522894717Writing region info on filesystem at 1731522894717Initializing all the Stores at 1731522894718 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522894718Cleaning up temporary data from old regions at 1731522894739 (+21 ms)Running coprocessor post-open hooks at 1731522894742 (+3 ms)Region opened successfully at 1731522894743 (+1 ms) 2024-11-13T18:34:54,744 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5., pid=13, masterSystemTime=1731522894712 2024-11-13T18:34:54,744 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 36e76e28ea43bc826997975f0d3d11c5:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:34:54,745 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:54,745 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-13T18:34:54,747 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:34:54,747 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 36e76e28ea43bc826997975f0d3d11c5/info is initiating minor compaction (all files) 2024-11-13T18:34:54,747 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 36e76e28ea43bc826997975f0d3d11c5/info in TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:34:54,747 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-bottom] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/.tmp, totalSize=73.6 K 2024-11-13T18:34:54,748 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:34:54,748 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:34:54,748 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:34:54,748 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731522879601 2024-11-13T18:34:54,748 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 910931266edfbb972760e2de1949a1f7, NAME => 'TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-13T18:34:54,749 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=36e76e28ea43bc826997975f0d3d11c5, regionState=OPEN, openSeqNum=122, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,749 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,749 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:34:54,749 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,749 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,750 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43995 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=39e84130bbc9,41111,1731522867902, table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-13T18:34:54,752 INFO [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,752 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-13T18:34:54,752 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-13T18:34:54,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-13T18:34:54,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 36e76e28ea43bc826997975f0d3d11c5, server=39e84130bbc9,41111,1731522867902 because future has completed 2024-11-13T18:34:54,756 INFO [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 910931266edfbb972760e2de1949a1f7 columnFamilyName info 2024-11-13T18:34:54,757 DEBUG [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:34:54,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-13T18:34:54,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 36e76e28ea43bc826997975f0d3d11c5, server=39e84130bbc9,41111,1731522867902 in 193 msec 2024-11-13T18:34:54,759 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=36e76e28ea43bc826997975f0d3d11c5, ASSIGN in 360 msec 2024-11-13T18:34:54,783 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 36e76e28ea43bc826997975f0d3d11c5#info#compaction#65 average throughput is 10.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:34:54,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/700f5dd63d8141cd992520753e0143d1 is 193, key is TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7./info:regioninfo/1731522894554/Put/seqid=0 2024-11-13T18:34:54,787 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/.tmp/info/6b5198ec24d84a1985a44577cef5f6f3 is 1080, key is row0001/info:/1731522879601/Put/seqid=0 2024-11-13T18:34:54,790 DEBUG [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0 2024-11-13T18:34:54,795 DEBUG [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14 2024-11-13T18:34:54,802 DEBUG [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-top 2024-11-13T18:34:54,803 INFO [StoreOpener-910931266edfbb972760e2de1949a1f7-1 {}] regionserver.HStore(327): Store=910931266edfbb972760e2de1949a1f7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:34:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741850_1026 (size=70862) 2024-11-13T18:34:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741851_1027 (size=9847) 2024-11-13T18:34:54,805 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741851_1027 (size=9847) 2024-11-13T18:34:54,806 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741850_1026 (size=70862) 2024-11-13T18:34:54,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/700f5dd63d8141cd992520753e0143d1 2024-11-13T18:34:54,808 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,811 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,811 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,816 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,820 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 910931266edfbb972760e2de1949a1f7; next sequenceid=122; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755131, jitterRate=-0.039802104234695435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T18:34:54,820 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:34:54,820 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 910931266edfbb972760e2de1949a1f7: Running coprocessor pre-open hook at 1731522894749Writing region info on filesystem at 1731522894749Initializing all the Stores at 1731522894751 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522894751Cleaning up temporary data from old regions at 1731522894811 (+60 ms)Running coprocessor post-open hooks at 1731522894820 (+9 ms)Region opened successfully at 1731522894820 2024-11-13T18:34:54,821 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., pid=12, masterSystemTime=1731522894712 2024-11-13T18:34:54,822 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 2 2024-11-13T18:34:54,822 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-13T18:34:54,822 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:34:54,825 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:34:54,825 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:34:54,825 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:34:54,826 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-top, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=113.9 K 2024-11-13T18:34:54,827 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] compactions.Compactor(225): Compacting c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731522879601 2024-11-13T18:34:54,827 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/.tmp/info/6b5198ec24d84a1985a44577cef5f6f3 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/6b5198ec24d84a1985a44577cef5f6f3 2024-11-13T18:34:54,828 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731522893879 2024-11-13T18:34:54,828 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731522893918 2024-11-13T18:34:54,830 DEBUG [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:34:54,830 INFO [RS_OPEN_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:34:54,831 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=910931266edfbb972760e2de1949a1f7, regionState=OPEN, openSeqNum=122, regionLocation=39e84130bbc9,41111,1731522867902 2024-11-13T18:34:54,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 910931266edfbb972760e2de1949a1f7, server=39e84130bbc9,41111,1731522867902 because future has completed 2024-11-13T18:34:54,840 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 36e76e28ea43bc826997975f0d3d11c5/info of 36e76e28ea43bc826997975f0d3d11c5 into 6b5198ec24d84a1985a44577cef5f6f3(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:34:54,840 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 36e76e28ea43bc826997975f0d3d11c5: 2024-11-13T18:34:54,840 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5., storeName=36e76e28ea43bc826997975f0d3d11c5/info, priority=15, startTime=1731522894744; duration=0sec 2024-11-13T18:34:54,840 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:54,840 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 36e76e28ea43bc826997975f0d3d11c5:info 2024-11-13T18:34:54,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T18:34:54,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 910931266edfbb972760e2de1949a1f7, server=39e84130bbc9,41111,1731522867902 in 290 msec 2024-11-13T18:34:54,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-13T18:34:54,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=910931266edfbb972760e2de1949a1f7, ASSIGN in 453 msec 2024-11-13T18:34:54,858 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=4d4bed97268a796b28eb74cdec559a63, daughterA=36e76e28ea43bc826997975f0d3d11c5, daughterB=910931266edfbb972760e2de1949a1f7 in 852 msec 2024-11-13T18:34:54,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/ns/88cdd08cb97e444bb7a37d41020d9d13 is 43, key is default/ns:d/1731522869443/Put/seqid=0 2024-11-13T18:34:54,884 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#68 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:34:54,885 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d25438f1914c4278af2fcc7b6b03d655 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:34:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741852_1028 (size=5153) 2024-11-13T18:34:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741852_1028 (size=5153) 2024-11-13T18:34:54,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/ns/88cdd08cb97e444bb7a37d41020d9d13 2024-11-13T18:34:54,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741853_1029 (size=40830) 2024-11-13T18:34:54,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741853_1029 (size=40830) 2024-11-13T18:34:54,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/table/983b82b7663c418fac5678a9b69a0ed2 is 65, key is TestLogRolling-testLogRolling/table:state/1731522869979/Put/seqid=0 2024-11-13T18:34:54,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741854_1030 (size=5340) 2024-11-13T18:34:54,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741854_1030 (size=5340) 2024-11-13T18:34:55,326 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d25438f1914c4278af2fcc7b6b03d655 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d25438f1914c4278af2fcc7b6b03d655 2024-11-13T18:34:55,335 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into d25438f1914c4278af2fcc7b6b03d655(size=39.9 K), total size for store is 39.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:34:55,335 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:34:55,335 INFO [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522894822; duration=0sec 2024-11-13T18:34:55,336 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:34:55,336 DEBUG [RS:0;39e84130bbc9:41111-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:34:55,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/table/983b82b7663c418fac5678a9b69a0ed2 2024-11-13T18:34:55,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/700f5dd63d8141cd992520753e0143d1 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/info/700f5dd63d8141cd992520753e0143d1 2024-11-13T18:34:55,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/info/700f5dd63d8141cd992520753e0143d1, entries=30, sequenceid=17, filesize=9.6 K 2024-11-13T18:34:55,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/ns/88cdd08cb97e444bb7a37d41020d9d13 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/ns/88cdd08cb97e444bb7a37d41020d9d13 2024-11-13T18:34:55,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/ns/88cdd08cb97e444bb7a37d41020d9d13, entries=2, sequenceid=17, filesize=5.0 K 2024-11-13T18:34:55,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/table/983b82b7663c418fac5678a9b69a0ed2 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/table/983b82b7663c418fac5678a9b69a0ed2 2024-11-13T18:34:55,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/table/983b82b7663c418fac5678a9b69a0ed2, entries=2, sequenceid=17, filesize=5.2 K 2024-11-13T18:34:55,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 668ms, sequenceid=17, compaction requested=false 2024-11-13T18:34:55,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T18:34:55,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:55,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:56,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:56,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:57,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:57,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:57,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:34:58,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:58,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:59,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,224 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,225 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,248 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:59,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:34:59,767 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T18:34:59,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,769 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:34:59,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:00,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:00,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:01,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:01,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:02,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:02,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:03,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:03,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:47498 deadline: 1731522914012, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. is not online on 39e84130bbc9,41111,1731522867902 2024-11-13T18:35:04,014 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. is not online on 39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:35:04,014 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63. is not online on 39e84130bbc9,41111,1731522867902 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T18:35:04,014 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731522869490.4d4bed97268a796b28eb74cdec559a63., hostname=39e84130bbc9,41111,1731522867902, seqNum=2 from cache 2024-11-13T18:35:04,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:04,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:05,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:05,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:06,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:06,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:07,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:07,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:08,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:08,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:09,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:09,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:10,080 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T18:35:10,080 INFO [master/39e84130bbc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T18:35:10,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:10,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:11,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:11,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:12,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:12,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:13,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:13,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:14,336 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-13T18:35:14,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:14,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:15,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:15,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:16,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:16,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:17,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:17,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:18,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:18,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:19,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:19,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:20,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:20,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:21,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:21,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:22,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:22,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:23,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:23,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:24,040 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0095', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., hostname=39e84130bbc9,41111,1731522867902, seqNum=122] 2024-11-13T18:35:24,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:24,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:25,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:25,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:26,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:35:26,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/8ccf3c4fe9564f608897118e379e7fa4 is 1080, key is row0095/info:/1731522924041/Put/seqid=0 2024-11-13T18:35:26,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741855_1031 (size=12513) 2024-11-13T18:35:26,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741855_1031 (size=12513) 2024-11-13T18:35:26,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/8ccf3c4fe9564f608897118e379e7fa4 2024-11-13T18:35:26,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/8ccf3c4fe9564f608897118e379e7fa4 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4 2024-11-13T18:35:26,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4, entries=7, sequenceid=132, filesize=12.2 K 2024-11-13T18:35:26,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 910931266edfbb972760e2de1949a1f7 in 30ms, sequenceid=132, compaction requested=false 2024-11-13T18:35:26,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:26,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T18:35:26,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d87d1bf5bc7741dda02f38af0e83c037 is 1080, key is row0102/info:/1731522926055/Put/seqid=0 2024-11-13T18:35:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741856_1032 (size=21156) 2024-11-13T18:35:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741856_1032 (size=21156) 2024-11-13T18:35:26,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d87d1bf5bc7741dda02f38af0e83c037 2024-11-13T18:35:26,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d87d1bf5bc7741dda02f38af0e83c037 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037 2024-11-13T18:35:26,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037, entries=15, sequenceid=150, filesize=20.7 K 2024-11-13T18:35:26,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for 910931266edfbb972760e2de1949a1f7 in 42ms, sequenceid=150, compaction requested=true 2024-11-13T18:35:26,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:26,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:26,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:26,128 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:26,129 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:26,129 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:26,129 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:26,129 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d25438f1914c4278af2fcc7b6b03d655, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=72.8 K 2024-11-13T18:35:26,130 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting d25438f1914c4278af2fcc7b6b03d655, keycount=33, bloomtype=ROW, size=39.9 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731522891858 2024-11-13T18:35:26,130 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8ccf3c4fe9564f608897118e379e7fa4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731522924041 2024-11-13T18:35:26,130 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting d87d1bf5bc7741dda02f38af0e83c037, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1731522926055 2024-11-13T18:35:26,146 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#72 average throughput is 14.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:26,147 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/11cbc91dbbf14d21b6a6fcece9056d89 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:26,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741857_1033 (size=64713) 2024-11-13T18:35:26,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741857_1033 (size=64713) 2024-11-13T18:35:26,165 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/11cbc91dbbf14d21b6a6fcece9056d89 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/11cbc91dbbf14d21b6a6fcece9056d89 2024-11-13T18:35:26,173 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into 11cbc91dbbf14d21b6a6fcece9056d89(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:26,173 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:26,173 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522926127; duration=0sec 2024-11-13T18:35:26,173 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:26,173 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:26,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:26,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:27,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:27,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:27,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T18:35:28,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:28,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T18:35:28,115 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/90584e3f19da4813a41d15a73723ba1f is 1080, key is row0117/info:/1731522926087/Put/seqid=0 2024-11-13T18:35:28,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741858_1034 (size=19000) 2024-11-13T18:35:28,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741858_1034 (size=19000) 2024-11-13T18:35:28,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/90584e3f19da4813a41d15a73723ba1f 2024-11-13T18:35:28,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/90584e3f19da4813a41d15a73723ba1f as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f 2024-11-13T18:35:28,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f, entries=13, sequenceid=167, filesize=18.6 K 2024-11-13T18:35:28,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=9.46 KB/9684 for 910931266edfbb972760e2de1949a1f7 in 27ms, sequenceid=167, compaction requested=false 2024-11-13T18:35:28,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:28,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:28,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T18:35:28,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/dcb2ef422f884829b1098a2511fd02da is 1080, key is row0130/info:/1731522928110/Put/seqid=0 2024-11-13T18:35:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741859_1035 (size=16828) 2024-11-13T18:35:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741859_1035 (size=16828) 2024-11-13T18:35:28,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/dcb2ef422f884829b1098a2511fd02da 2024-11-13T18:35:28,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/dcb2ef422f884829b1098a2511fd02da as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da 2024-11-13T18:35:28,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da, entries=11, sequenceid=181, filesize=16.4 K 2024-11-13T18:35:28,161 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 910931266edfbb972760e2de1949a1f7 in 23ms, sequenceid=181, compaction requested=true 2024-11-13T18:35:28,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:28,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:28,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:28,162 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:28,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:28,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T18:35:28,163 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100541 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:28,163 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:28,163 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:28,163 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/11cbc91dbbf14d21b6a6fcece9056d89, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=98.2 K 2024-11-13T18:35:28,164 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 11cbc91dbbf14d21b6a6fcece9056d89, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1731522891858 2024-11-13T18:35:28,164 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90584e3f19da4813a41d15a73723ba1f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731522926087 2024-11-13T18:35:28,164 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting dcb2ef422f884829b1098a2511fd02da, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731522928110 2024-11-13T18:35:28,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/9790b003edd54fdb8b898dae4d2b49e1 is 1080, key is row0141/info:/1731522928139/Put/seqid=0 2024-11-13T18:35:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741860_1036 (size=19000) 2024-11-13T18:35:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741860_1036 (size=19000) 2024-11-13T18:35:28,177 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/9790b003edd54fdb8b898dae4d2b49e1 2024-11-13T18:35:28,186 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#76 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:28,187 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/e3f3b66bc7074ba7837e74253d9b9b27 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:28,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/9790b003edd54fdb8b898dae4d2b49e1 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1 2024-11-13T18:35:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741861_1037 (size=90764) 2024-11-13T18:35:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741861_1037 (size=90764) 2024-11-13T18:35:28,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1, entries=13, sequenceid=197, filesize=18.6 K 2024-11-13T18:35:28,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for 910931266edfbb972760e2de1949a1f7 in 37ms, sequenceid=197, compaction requested=false 2024-11-13T18:35:28,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:28,203 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/e3f3b66bc7074ba7837e74253d9b9b27 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e3f3b66bc7074ba7837e74253d9b9b27 2024-11-13T18:35:28,209 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into e3f3b66bc7074ba7837e74253d9b9b27(size=88.6 K), total size for store is 107.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:28,209 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:28,209 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522928161; duration=0sec 2024-11-13T18:35:28,209 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:28,209 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:28,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:28,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:29,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:29,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:30,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-13T18:35:30,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6ef894fe11e94d7ea45e4ace9058cdef is 1080, key is row0154/info:/1731522928165/Put/seqid=0 2024-11-13T18:35:30,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741862_1038 (size=13594) 2024-11-13T18:35:30,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741862_1038 (size=13594) 2024-11-13T18:35:30,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6ef894fe11e94d7ea45e4ace9058cdef 2024-11-13T18:35:30,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6ef894fe11e94d7ea45e4ace9058cdef as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef 2024-11-13T18:35:30,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef, entries=8, sequenceid=209, filesize=13.3 K 2024-11-13T18:35:30,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=14.71 KB/15064 for 910931266edfbb972760e2de1949a1f7 in 24ms, sequenceid=209, compaction requested=true 2024-11-13T18:35:30,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:30,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:30,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:30,215 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:30,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:30,216 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123358 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:30,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-13T18:35:30,216 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:30,216 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:30,216 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e3f3b66bc7074ba7837e74253d9b9b27, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=120.5 K 2024-11-13T18:35:30,216 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting e3f3b66bc7074ba7837e74253d9b9b27, keycount=79, bloomtype=ROW, size=88.6 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1731522891858 2024-11-13T18:35:30,217 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9790b003edd54fdb8b898dae4d2b49e1, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731522928139 2024-11-13T18:35:30,217 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ef894fe11e94d7ea45e4ace9058cdef, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731522928165 2024-11-13T18:35:30,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/945382dfbfaa449da9c265b57cd593e6 is 1080, key is row0162/info:/1731522930191/Put/seqid=0 2024-11-13T18:35:30,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741863_1039 (size=22238) 2024-11-13T18:35:30,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741863_1039 (size=22238) 2024-11-13T18:35:30,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=228 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/945382dfbfaa449da9c265b57cd593e6 2024-11-13T18:35:30,230 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#79 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:30,231 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/ee5a3d82428f4788aa21126b7c7aede8 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:30,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/945382dfbfaa449da9c265b57cd593e6 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6 2024-11-13T18:35:30,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6, entries=16, sequenceid=228, filesize=21.7 K 2024-11-13T18:35:30,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=12.61 KB/12912 for 910931266edfbb972760e2de1949a1f7 in 23ms, sequenceid=228, compaction requested=false 2024-11-13T18:35:30,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:30,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T18:35:30,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741864_1040 (size=113508) 2024-11-13T18:35:30,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741864_1040 (size=113508) 2024-11-13T18:35:30,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6649085710bb4b51aad721fb53457e54 is 1080, key is row0178/info:/1731522930217/Put/seqid=0 2024-11-13T18:35:30,249 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/ee5a3d82428f4788aa21126b7c7aede8 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/ee5a3d82428f4788aa21126b7c7aede8 2024-11-13T18:35:30,255 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into ee5a3d82428f4788aa21126b7c7aede8(size=110.8 K), total size for store is 132.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:30,255 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:30,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741865_1041 (size=20078) 2024-11-13T18:35:30,255 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522930214; duration=0sec 2024-11-13T18:35:30,255 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:30,255 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:30,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741865_1041 (size=20078) 2024-11-13T18:35:30,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6649085710bb4b51aad721fb53457e54 2024-11-13T18:35:30,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/6649085710bb4b51aad721fb53457e54 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54 2024-11-13T18:35:30,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54, entries=14, sequenceid=245, filesize=19.6 K 2024-11-13T18:35:30,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 910931266edfbb972760e2de1949a1f7 in 26ms, sequenceid=245, compaction requested=true 2024-11-13T18:35:30,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:30,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:30,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:30,266 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:30,267 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155824 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:30,267 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:30,267 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:30,268 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/ee5a3d82428f4788aa21126b7c7aede8, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=152.2 K 2024-11-13T18:35:30,268 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting ee5a3d82428f4788aa21126b7c7aede8, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1731522891858 2024-11-13T18:35:30,268 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=66, reuseRatio=88.00% 2024-11-13T18:35:30,268 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 945382dfbfaa449da9c265b57cd593e6, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=228, earliestPutTs=1731522930191 2024-11-13T18:35:30,268 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-13T18:35:30,269 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6649085710bb4b51aad721fb53457e54, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731522930217 2024-11-13T18:35:30,280 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#81 average throughput is 44.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:30,281 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/617ac9f953754e8ba2efec23962a3bb6 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741866_1042 (size=146155) 2024-11-13T18:35:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741866_1042 (size=146155) 2024-11-13T18:35:30,293 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/617ac9f953754e8ba2efec23962a3bb6 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/617ac9f953754e8ba2efec23962a3bb6 2024-11-13T18:35:30,300 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into 617ac9f953754e8ba2efec23962a3bb6(size=142.7 K), total size for store is 142.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:30,300 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:30,300 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522930266; duration=0sec 2024-11-13T18:35:30,300 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:30,300 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:30,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:30,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:31,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:31,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:32,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:32,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T18:35:32,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/4ccadd5e71f14963a356b6beec295485 is 1080, key is row0192/info:/1731522930242/Put/seqid=0 2024-11-13T18:35:32,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741867_1043 (size=12517) 2024-11-13T18:35:32,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741867_1043 (size=12517) 2024-11-13T18:35:32,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/4ccadd5e71f14963a356b6beec295485 2024-11-13T18:35:32,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/4ccadd5e71f14963a356b6beec295485 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485 2024-11-13T18:35:32,275 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485, entries=7, sequenceid=257, filesize=12.2 K 2024-11-13T18:35:32,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 910931266edfbb972760e2de1949a1f7 in 23ms, sequenceid=257, compaction requested=false 2024-11-13T18:35:32,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:32,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T18:35:32,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d8c477c108d74c17a2289ea3daa7ba96 is 1080, key is row0199/info:/1731522932254/Put/seqid=0 2024-11-13T18:35:32,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741868_1044 (size=21171) 2024-11-13T18:35:32,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741868_1044 (size=21171) 2024-11-13T18:35:32,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d8c477c108d74c17a2289ea3daa7ba96 2024-11-13T18:35:32,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d8c477c108d74c17a2289ea3daa7ba96 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96 2024-11-13T18:35:32,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96, entries=15, sequenceid=275, filesize=20.7 K 2024-11-13T18:35:32,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 910931266edfbb972760e2de1949a1f7 in 22ms, sequenceid=275, compaction requested=true 2024-11-13T18:35:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:32,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:32,299 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:32,300 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 179843 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:32,300 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:32,300 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:32,300 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/617ac9f953754e8ba2efec23962a3bb6, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=175.6 K 2024-11-13T18:35:32,301 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 617ac9f953754e8ba2efec23962a3bb6, keycount=130, bloomtype=ROW, size=142.7 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731522891858 2024-11-13T18:35:32,301 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ccadd5e71f14963a356b6beec295485, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1731522930242 2024-11-13T18:35:32,301 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting d8c477c108d74c17a2289ea3daa7ba96, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731522932254 2024-11-13T18:35:32,312 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#84 average throughput is 51.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:32,313 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/93679be9814c4c7696b7dca4ea490c1c is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:32,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741869_1045 (size=169993) 2024-11-13T18:35:32,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741869_1045 (size=169993) 2024-11-13T18:35:32,322 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/93679be9814c4c7696b7dca4ea490c1c as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/93679be9814c4c7696b7dca4ea490c1c 2024-11-13T18:35:32,328 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into 93679be9814c4c7696b7dca4ea490c1c(size=166.0 K), total size for store is 166.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:32,328 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:32,328 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522932299; duration=0sec 2024-11-13T18:35:32,328 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:32,328 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:32,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:32,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:33,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:33,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:34,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:34,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T18:35:34,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d5e2c28f671d4594a1266b603c4c80a9 is 1080, key is row0214/info:/1731522932278/Put/seqid=0 2024-11-13T18:35:34,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741870_1046 (size=17918) 2024-11-13T18:35:34,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741870_1046 (size=17918) 2024-11-13T18:35:34,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d5e2c28f671d4594a1266b603c4c80a9 2024-11-13T18:35:34,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d5e2c28f671d4594a1266b603c4c80a9 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9 2024-11-13T18:35:34,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9, entries=12, sequenceid=291, filesize=17.5 K 2024-11-13T18:35:34,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 910931266edfbb972760e2de1949a1f7 in 22ms, sequenceid=291, compaction requested=false 2024-11-13T18:35:34,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:34,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:34,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T18:35:34,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/e965f3bec0f246a6ba3c055015c01def is 1080, key is row0226/info:/1731522934298/Put/seqid=0 2024-11-13T18:35:34,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741871_1047 (size=16839) 2024-11-13T18:35:34,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741871_1047 (size=16839) 2024-11-13T18:35:34,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/e965f3bec0f246a6ba3c055015c01def 2024-11-13T18:35:34,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/e965f3bec0f246a6ba3c055015c01def as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def 2024-11-13T18:35:34,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def, entries=11, sequenceid=305, filesize=16.4 K 2024-11-13T18:35:34,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 910931266edfbb972760e2de1949a1f7 in 23ms, sequenceid=305, compaction requested=true 2024-11-13T18:35:34,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:34,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 910931266edfbb972760e2de1949a1f7:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T18:35:34,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:34,345 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T18:35:34,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41111 {}] regionserver.HRegion(8855): Flush requested on 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:34,345 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T18:35:34,346 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 204750 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T18:35:34,346 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1541): 910931266edfbb972760e2de1949a1f7/info is initiating minor compaction (all files) 2024-11-13T18:35:34,346 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 910931266edfbb972760e2de1949a1f7/info in TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:34,346 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/93679be9814c4c7696b7dca4ea490c1c, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def] into tmpdir=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp, totalSize=200.0 K 2024-11-13T18:35:34,346 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93679be9814c4c7696b7dca4ea490c1c, keycount=152, bloomtype=ROW, size=166.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731522891858 2024-11-13T18:35:34,347 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5e2c28f671d4594a1266b603c4c80a9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1731522932278 2024-11-13T18:35:34,347 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] compactions.Compactor(225): Compacting e965f3bec0f246a6ba3c055015c01def, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1731522934298 2024-11-13T18:35:34,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/afb8525169c04a5491c21b85822c4080 is 1080, key is row0237/info:/1731522934322/Put/seqid=0 2024-11-13T18:35:34,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741872_1048 (size=20092) 2024-11-13T18:35:34,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741872_1048 (size=20092) 2024-11-13T18:35:34,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/afb8525169c04a5491c21b85822c4080 2024-11-13T18:35:34,364 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 910931266edfbb972760e2de1949a1f7#info#compaction#88 average throughput is 59.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T18:35:34,365 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/c4f3c2bcc823425eaadbf2241022a2b6 is 1080, key is row0062/info:/1731522891858/Put/seqid=0 2024-11-13T18:35:34,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/afb8525169c04a5491c21b85822c4080 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/afb8525169c04a5491c21b85822c4080 2024-11-13T18:35:34,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/afb8525169c04a5491c21b85822c4080, entries=14, sequenceid=322, filesize=19.6 K 2024-11-13T18:35:34,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=6.30 KB/6456 for 910931266edfbb972760e2de1949a1f7 in 29ms, sequenceid=322, compaction requested=false 2024-11-13T18:35:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:34,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741873_1049 (size=194920) 2024-11-13T18:35:34,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741873_1049 (size=194920) 2024-11-13T18:35:34,388 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/c4f3c2bcc823425eaadbf2241022a2b6 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c4f3c2bcc823425eaadbf2241022a2b6 2024-11-13T18:35:34,394 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 910931266edfbb972760e2de1949a1f7/info of 910931266edfbb972760e2de1949a1f7 into c4f3c2bcc823425eaadbf2241022a2b6(size=190.4 K), total size for store is 210.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T18:35:34,394 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:34,394 INFO [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7., storeName=910931266edfbb972760e2de1949a1f7/info, priority=13, startTime=1731522934344; duration=0sec 2024-11-13T18:35:34,394 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T18:35:34,394 DEBUG [RS:0;39e84130bbc9:41111-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 910931266edfbb972760e2de1949a1f7:info 2024-11-13T18:35:34,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:34,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:35,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:35,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:36,356 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-13T18:35:36,357 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C41111%2C1731522867902.1731522936356 2024-11-13T18:35:36,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,374 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,374 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,374 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,374 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522868880 with entries=310, filesize=307.72 KB; new WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522936356 2024-11-13T18:35:36,375 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39469:39469),(127.0.0.1/127.0.0.1:43903:43903)] 2024-11-13T18:35:36,375 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522868880 is not closed yet, will try archiving it next time 2024-11-13T18:35:36,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741833_1009 (size=315114) 2024-11-13T18:35:36,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741833_1009 (size=315114) 2024-11-13T18:35:36,383 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-13T18:35:36,387 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/aaf41021436c418aa9663d705111bcf7 is 193, key is TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7./info:regioninfo/1731522894831/Put/seqid=0 2024-11-13T18:35:36,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741875_1051 (size=6223) 2024-11-13T18:35:36,391 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/aaf41021436c418aa9663d705111bcf7 2024-11-13T18:35:36,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741875_1051 (size=6223) 2024-11-13T18:35:36,397 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/.tmp/info/aaf41021436c418aa9663d705111bcf7 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/info/aaf41021436c418aa9663d705111bcf7 2024-11-13T18:35:36,402 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/info/aaf41021436c418aa9663d705111bcf7, entries=5, sequenceid=21, filesize=6.1 K 2024-11-13T18:35:36,403 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-13T18:35:36,403 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T18:35:36,403 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 36e76e28ea43bc826997975f0d3d11c5: 2024-11-13T18:35:36,403 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 910931266edfbb972760e2de1949a1f7 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-13T18:35:36,406 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d165ad20cc1a4061b314ad01855686e7 is 1080, key is row0251/info:/1731522934346/Put/seqid=0 2024-11-13T18:35:36,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741876_1052 (size=11436) 2024-11-13T18:35:36,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741876_1052 (size=11436) 2024-11-13T18:35:36,411 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d165ad20cc1a4061b314ad01855686e7 2024-11-13T18:35:36,415 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/.tmp/info/d165ad20cc1a4061b314ad01855686e7 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d165ad20cc1a4061b314ad01855686e7 2024-11-13T18:35:36,420 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d165ad20cc1a4061b314ad01855686e7, entries=6, sequenceid=332, filesize=11.2 K 2024-11-13T18:35:36,422 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 910931266edfbb972760e2de1949a1f7 in 19ms, sequenceid=332, compaction requested=true 2024-11-13T18:35:36,422 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 910931266edfbb972760e2de1949a1f7: 2024-11-13T18:35:36,422 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C41111%2C1731522867902.1731522936422 2024-11-13T18:35:36,429 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,429 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,430 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,430 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,430 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,430 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522936356 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522936422 2024-11-13T18:35:36,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741874_1050 (size=731) 2024-11-13T18:35:36,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741874_1050 (size=731) 2024-11-13T18:35:36,433 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522868880 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs/39e84130bbc9%2C41111%2C1731522867902.1731522868880 2024-11-13T18:35:36,434 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43903:43903),(127.0.0.1/127.0.0.1:39469:39469)] 2024-11-13T18:35:36,434 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/WALs/39e84130bbc9,41111,1731522867902/39e84130bbc9%2C41111%2C1731522867902.1731522936356 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs/39e84130bbc9%2C41111%2C1731522867902.1731522936356 2024-11-13T18:35:36,434 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T18:35:36,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:35:36,434 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:35:36,435 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:36,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:36,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:36,435 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:35:36,435 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=710309200, stopped=false 2024-11-13T18:35:36,435 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,43995,1731522867811 2024-11-13T18:35:36,435 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:35:36,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:36,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:36,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:36,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:36,437 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:35:36,437 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:35:36,437 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:36,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:36,437 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:36,437 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,41111,1731522867902' ***** 2024-11-13T18:35:36,437 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:35:36,438 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:35:36,438 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(3091): Received CLOSE for 36e76e28ea43bc826997975f0d3d11c5 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(3091): Received CLOSE for 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,41111,1731522867902 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:41111. 2024-11-13T18:35:36,438 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 36e76e28ea43bc826997975f0d3d11c5, disabling compactions & flushes 2024-11-13T18:35:36,438 DEBUG [RS:0;39e84130bbc9:41111 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:36,438 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:35:36,438 DEBUG [RS:0;39e84130bbc9:41111 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:36,438 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:35:36,438 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. after waiting 0 ms 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:35:36,438 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:35:36,438 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:35:36,439 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:35:36,439 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-13T18:35:36,439 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 36e76e28ea43bc826997975f0d3d11c5=TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5., 910931266edfbb972760e2de1949a1f7=TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.} 2024-11-13T18:35:36,439 DEBUG [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 36e76e28ea43bc826997975f0d3d11c5, 910931266edfbb972760e2de1949a1f7 2024-11-13T18:35:36,439 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:35:36,439 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:35:36,439 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-bottom] to archive 2024-11-13T18:35:36,439 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:35:36,439 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:35:36,439 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:35:36,440 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:35:36,442 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:35:36,442 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=39e84130bbc9:43995 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T18:35:36,442 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-13T18:35:36,444 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-13T18:35:36,444 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:35:36,444 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:35:36,445 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522936439Running coprocessor pre-close hooks at 1731522936439Disabling compacts and flushes for region at 1731522936439Disabling writes for close at 1731522936439Writing region close event to WAL at 1731522936440 (+1 ms)Running coprocessor post-close hooks at 1731522936444 (+4 ms)Closed at 1731522936444 2024-11-13T18:35:36,445 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:35:36,445 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/36e76e28ea43bc826997975f0d3d11c5/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=121 2024-11-13T18:35:36,446 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 36e76e28ea43bc826997975f0d3d11c5: Waiting for close lock at 1731522936438Running coprocessor pre-close hooks at 1731522936438Disabling compacts and flushes for region at 1731522936438Disabling writes for close at 1731522936438Writing region close event to WAL at 1731522936442 (+4 ms)Running coprocessor post-close hooks at 1731522936446 (+4 ms)Closed at 1731522936446 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731522894002.36e76e28ea43bc826997975f0d3d11c5. 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 910931266edfbb972760e2de1949a1f7, disabling compactions & flushes 2024-11-13T18:35:36,446 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. after waiting 0 ms 2024-11-13T18:35:36,446 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:36,447 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63->hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/4d4bed97268a796b28eb74cdec559a63/info/c9641bea28174078830054e84a2bcd6e-top, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d25438f1914c4278af2fcc7b6b03d655, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/11cbc91dbbf14d21b6a6fcece9056d89, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e3f3b66bc7074ba7837e74253d9b9b27, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/ee5a3d82428f4788aa21126b7c7aede8, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/617ac9f953754e8ba2efec23962a3bb6, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/93679be9814c4c7696b7dca4ea490c1c, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def] to archive 2024-11-13T18:35:36,448 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T18:35:36,449 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/c9641bea28174078830054e84a2bcd6e.4d4bed97268a796b28eb74cdec559a63 2024-11-13T18:35:36,450 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-1f5b4e2088054d599bcb66b966cc4ab0 2024-11-13T18:35:36,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d25438f1914c4278af2fcc7b6b03d655 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d25438f1914c4278af2fcc7b6b03d655 2024-11-13T18:35:36,452 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/TestLogRolling-testLogRolling=4d4bed97268a796b28eb74cdec559a63-be46a4724ee74b33b396eaade6ecda14 2024-11-13T18:35:36,453 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/8ccf3c4fe9564f608897118e379e7fa4 2024-11-13T18:35:36,454 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/11cbc91dbbf14d21b6a6fcece9056d89 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/11cbc91dbbf14d21b6a6fcece9056d89 2024-11-13T18:35:36,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d87d1bf5bc7741dda02f38af0e83c037 2024-11-13T18:35:36,456 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/90584e3f19da4813a41d15a73723ba1f 2024-11-13T18:35:36,457 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e3f3b66bc7074ba7837e74253d9b9b27 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e3f3b66bc7074ba7837e74253d9b9b27 2024-11-13T18:35:36,458 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/dcb2ef422f884829b1098a2511fd02da 2024-11-13T18:35:36,460 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/9790b003edd54fdb8b898dae4d2b49e1 2024-11-13T18:35:36,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/ee5a3d82428f4788aa21126b7c7aede8 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/ee5a3d82428f4788aa21126b7c7aede8 2024-11-13T18:35:36,462 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6ef894fe11e94d7ea45e4ace9058cdef 2024-11-13T18:35:36,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/945382dfbfaa449da9c265b57cd593e6 2024-11-13T18:35:36,464 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/617ac9f953754e8ba2efec23962a3bb6 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/617ac9f953754e8ba2efec23962a3bb6 2024-11-13T18:35:36,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/6649085710bb4b51aad721fb53457e54 2024-11-13T18:35:36,467 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/4ccadd5e71f14963a356b6beec295485 2024-11-13T18:35:36,468 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/93679be9814c4c7696b7dca4ea490c1c to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/93679be9814c4c7696b7dca4ea490c1c 2024-11-13T18:35:36,469 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d8c477c108d74c17a2289ea3daa7ba96 2024-11-13T18:35:36,470 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9 to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/d5e2c28f671d4594a1266b603c4c80a9 2024-11-13T18:35:36,471 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def to hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/archive/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/info/e965f3bec0f246a6ba3c055015c01def 2024-11-13T18:35:36,471 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d25438f1914c4278af2fcc7b6b03d655=40830, 8ccf3c4fe9564f608897118e379e7fa4=12513, 11cbc91dbbf14d21b6a6fcece9056d89=64713, d87d1bf5bc7741dda02f38af0e83c037=21156, 90584e3f19da4813a41d15a73723ba1f=19000, e3f3b66bc7074ba7837e74253d9b9b27=90764, dcb2ef422f884829b1098a2511fd02da=16828, 9790b003edd54fdb8b898dae4d2b49e1=19000, ee5a3d82428f4788aa21126b7c7aede8=113508, 6ef894fe11e94d7ea45e4ace9058cdef=13594, 945382dfbfaa449da9c265b57cd593e6=22238, 617ac9f953754e8ba2efec23962a3bb6=146155, 6649085710bb4b51aad721fb53457e54=20078, 4ccadd5e71f14963a356b6beec295485=12517, 93679be9814c4c7696b7dca4ea490c1c=169993, d8c477c108d74c17a2289ea3daa7ba96=21171, d5e2c28f671d4594a1266b603c4c80a9=17918, e965f3bec0f246a6ba3c055015c01def=16839] 2024-11-13T18:35:36,475 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/data/default/TestLogRolling-testLogRolling/910931266edfbb972760e2de1949a1f7/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=121 2024-11-13T18:35:36,476 INFO [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:36,476 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 910931266edfbb972760e2de1949a1f7: Waiting for close lock at 1731522936446Running coprocessor pre-close hooks at 1731522936446Disabling compacts and flushes for region at 1731522936446Disabling writes for close at 1731522936446Writing region close event to WAL at 1731522936472 (+26 ms)Running coprocessor post-close hooks at 1731522936475 (+3 ms)Closed at 1731522936476 (+1 ms) 2024-11-13T18:35:36,476 DEBUG [RS_CLOSE_REGION-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731522894002.910931266edfbb972760e2de1949a1f7. 2024-11-13T18:35:36,639 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,41111,1731522867902; all regions closed. 2024-11-13T18:35:36,640 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,640 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,640 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,640 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,640 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741834_1010 (size=8107) 2024-11-13T18:35:36,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741834_1010 (size=8107) 2024-11-13T18:35:36,645 DEBUG [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs 2024-11-13T18:35:36,645 INFO [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C41111%2C1731522867902.meta:.meta(num 1731522869297) 2024-11-13T18:35:36,646 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,646 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,646 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741877_1053 (size=778) 2024-11-13T18:35:36,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741877_1053 (size=778) 2024-11-13T18:35:36,650 DEBUG [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/oldWALs 2024-11-13T18:35:36,650 INFO [RS:0;39e84130bbc9:41111 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C41111%2C1731522867902:(num 1731522936422) 2024-11-13T18:35:36,650 DEBUG [RS:0;39e84130bbc9:41111 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:36,650 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:35:36,650 INFO [RS:0;39e84130bbc9:41111 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:35:36,650 INFO [RS:0;39e84130bbc9:41111 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T18:35:36,650 INFO [RS:0;39e84130bbc9:41111 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:35:36,650 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:35:36,651 INFO [RS:0;39e84130bbc9:41111 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41111 2024-11-13T18:35:36,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:35:36,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,41111,1731522867902 2024-11-13T18:35:36,653 INFO [RS:0;39e84130bbc9:41111 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:35:36,654 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,41111,1731522867902] 2024-11-13T18:35:36,655 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,41111,1731522867902 already deleted, retry=false 2024-11-13T18:35:36,655 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,41111,1731522867902 expired; onlineServers=0 2024-11-13T18:35:36,655 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,43995,1731522867811' ***** 2024-11-13T18:35:36,655 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:35:36,655 INFO [M:0;39e84130bbc9:43995 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:35:36,656 DEBUG [M:0;39e84130bbc9:43995 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:35:36,656 DEBUG [M:0;39e84130bbc9:43995 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:35:36,656 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:35:36,656 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522868616 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522868616,5,FailOnTimeoutGroup] 2024-11-13T18:35:36,656 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522868616 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522868616,5,FailOnTimeoutGroup] 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:35:36,656 DEBUG [M:0;39e84130bbc9:43995 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:35:36,656 INFO [M:0;39e84130bbc9:43995 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:35:36,656 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:35:36,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:35:36,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:36,657 DEBUG [M:0;39e84130bbc9:43995 {}] zookeeper.ZKUtil(347): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:35:36,658 WARN [M:0;39e84130bbc9:43995 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:35:36,658 INFO [M:0;39e84130bbc9:43995 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/.lastflushedseqids 2024-11-13T18:35:36,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741878_1054 (size=228) 2024-11-13T18:35:36,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741878_1054 (size=228) 2024-11-13T18:35:36,664 INFO [M:0;39e84130bbc9:43995 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:35:36,665 INFO [M:0;39e84130bbc9:43995 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:35:36,665 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:35:36,665 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:36,665 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:36,665 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:35:36,665 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:36,665 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-13T18:35:36,688 DEBUG [M:0;39e84130bbc9:43995 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd41643a53584ac4b9c71851b1f80a6c is 82, key is hbase:meta,,1/info:regioninfo/1731522869390/Put/seqid=0 2024-11-13T18:35:36,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741879_1055 (size=5672) 2024-11-13T18:35:36,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741879_1055 (size=5672) 2024-11-13T18:35:36,693 INFO [M:0;39e84130bbc9:43995 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd41643a53584ac4b9c71851b1f80a6c 2024-11-13T18:35:36,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:36,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:36,720 DEBUG [M:0;39e84130bbc9:43995 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed028ef380614a7eb82205a222ca356e is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731522869984/Put/seqid=0 2024-11-13T18:35:36,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741880_1056 (size=7090) 2024-11-13T18:35:36,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741880_1056 (size=7090) 2024-11-13T18:35:36,729 INFO [M:0;39e84130bbc9:43995 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed028ef380614a7eb82205a222ca356e 2024-11-13T18:35:36,733 INFO [M:0;39e84130bbc9:43995 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed028ef380614a7eb82205a222ca356e 2024-11-13T18:35:36,737 INFO [regionserver/39e84130bbc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:35:36,748 DEBUG [M:0;39e84130bbc9:43995 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87650fc549484307bdf8378e9d33d73b is 69, key is 39e84130bbc9,41111,1731522867902/rs:state/1731522868706/Put/seqid=0 2024-11-13T18:35:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741881_1057 (size=5156) 2024-11-13T18:35:36,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741881_1057 (size=5156) 2024-11-13T18:35:36,753 INFO [M:0;39e84130bbc9:43995 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87650fc549484307bdf8378e9d33d73b 2024-11-13T18:35:36,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:36,754 INFO [RS:0;39e84130bbc9:41111 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:35:36,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41111-0x100ed6126fc0001, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:36,754 INFO [RS:0;39e84130bbc9:41111 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,41111,1731522867902; zookeeper connection closed. 2024-11-13T18:35:36,755 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c4fdf17 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c4fdf17 2024-11-13T18:35:36,755 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:35:36,780 DEBUG [M:0;39e84130bbc9:43995 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/40b68c09b2cd49ff9288810c2e1f6260 is 52, key is load_balancer_on/state:d/1731522869484/Put/seqid=0 2024-11-13T18:35:36,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741882_1058 (size=5056) 2024-11-13T18:35:36,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741882_1058 (size=5056) 2024-11-13T18:35:36,786 INFO [M:0;39e84130bbc9:43995 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/40b68c09b2cd49ff9288810c2e1f6260 2024-11-13T18:35:36,791 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd41643a53584ac4b9c71851b1f80a6c as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd41643a53584ac4b9c71851b1f80a6c 2024-11-13T18:35:36,796 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd41643a53584ac4b9c71851b1f80a6c, entries=8, sequenceid=125, filesize=5.5 K 2024-11-13T18:35:36,796 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ed028ef380614a7eb82205a222ca356e as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed028ef380614a7eb82205a222ca356e 2024-11-13T18:35:36,801 INFO [M:0;39e84130bbc9:43995 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ed028ef380614a7eb82205a222ca356e 2024-11-13T18:35:36,801 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ed028ef380614a7eb82205a222ca356e, entries=13, sequenceid=125, filesize=6.9 K 2024-11-13T18:35:36,802 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87650fc549484307bdf8378e9d33d73b as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87650fc549484307bdf8378e9d33d73b 2024-11-13T18:35:36,805 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87650fc549484307bdf8378e9d33d73b, entries=1, sequenceid=125, filesize=5.0 K 2024-11-13T18:35:36,806 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/40b68c09b2cd49ff9288810c2e1f6260 as hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/40b68c09b2cd49ff9288810c2e1f6260 2024-11-13T18:35:36,811 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41889/user/jenkins/test-data/840bffff-d37b-9510-5dee-87e55aeb2d59/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/40b68c09b2cd49ff9288810c2e1f6260, entries=1, sequenceid=125, filesize=4.9 K 2024-11-13T18:35:36,812 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false 2024-11-13T18:35:36,813 INFO [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:36,814 DEBUG [M:0;39e84130bbc9:43995 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522936665Disabling compacts and flushes for region at 1731522936665Disabling writes for close at 1731522936665Obtaining lock to block concurrent updates at 1731522936665Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522936665Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731522936665Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522936666 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522936666Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522936687 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522936687Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522936699 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522936720 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522936720Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522936734 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522936747 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522936747Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522936759 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522936779 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522936779Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50515379: reopening flushed file at 1731522936790 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20d2ba3f: reopening flushed file at 1731522936796 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5db309ee: reopening flushed file at 1731522936801 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a6dd09: reopening flushed file at 1731522936806 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false at 1731522936812 (+6 ms)Writing region close event to WAL at 1731522936813 (+1 ms)Closed at 1731522936813 2024-11-13T18:35:36,814 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,815 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46371 is added to blk_1073741830_1006 (size=61320) 2024-11-13T18:35:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35915 is added to blk_1073741830_1006 (size=61320) 2024-11-13T18:35:36,818 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:35:36,818 INFO [M:0;39e84130bbc9:43995 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:35:36,818 INFO [M:0;39e84130bbc9:43995 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43995 2024-11-13T18:35:36,819 INFO [M:0;39e84130bbc9:43995 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:35:36,921 INFO [M:0;39e84130bbc9:43995 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:35:36,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:36,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43995-0x100ed6126fc0000, quorum=127.0.0.1:61079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:36,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@719b1e37{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:36,923 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34accf12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:36,924 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:36,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3868302b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:36,924 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@412b5320{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:36,926 WARN [BP-795142871-172.17.0.3-1731522866841 heartbeating to localhost/127.0.0.1:41889 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:35:36,926 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:35:36,926 WARN [BP-795142871-172.17.0.3-1731522866841 heartbeating to localhost/127.0.0.1:41889 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-795142871-172.17.0.3-1731522866841 (Datanode Uuid 3cf52934-28d0-4c6a-b632-d8a27dce4551) service to localhost/127.0.0.1:41889 2024-11-13T18:35:36,926 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:35:36,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,926 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data3/current/BP-795142871-172.17.0.3-1731522866841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:36,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data4/current/BP-795142871-172.17.0.3-1731522866841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:36,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:36,973 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:35:36,978 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7305dd28{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:36,979 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f8818bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:36,979 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:36,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@516e643a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:36,979 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75d9b484{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:36,980 WARN [BP-795142871-172.17.0.3-1731522866841 heartbeating to localhost/127.0.0.1:41889 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:35:36,980 WARN [BP-795142871-172.17.0.3-1731522866841 heartbeating to localhost/127.0.0.1:41889 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-795142871-172.17.0.3-1731522866841 (Datanode Uuid 34ae6a58-a938-405e-892f-c45c46439678) service to localhost/127.0.0.1:41889 2024-11-13T18:35:36,980 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:35:36,980 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:35:36,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data1/current/BP-795142871-172.17.0.3-1731522866841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:36,981 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/cluster_c4e11767-c141-5b9f-e31e-a998e68e2837/data/data2/current/BP-795142871-172.17.0.3-1731522866841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:36,981 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:35:36,987 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e4041f6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:35:36,988 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@c5b7d7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:36,988 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:36,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51e0317a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:36,988 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@540b6b2e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:36,995 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:35:37,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:35:37,042 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=226 (was 206) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41889 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41889 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41889 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41889 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41889 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:41889 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=382 (was 419), ProcessCount=11 (was 11), AvailableMemoryMB=1073 (was 2986) 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=226, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=382, ProcessCount=11, AvailableMemoryMB=1073 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.log.dir so I do NOT create it in target/test-data/d9ab4104-e784-9757-087d-f56c137ad446 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/172312b3-0512-7530-eb54-b00759a8b0b7/hadoop.tmp.dir so I do NOT create it in target/test-data/d9ab4104-e784-9757-087d-f56c137ad446 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb, deleteOnExit=true 2024-11-13T18:35:37,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/test.cache.data in system properties and HBase conf 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir in system properties and HBase conf 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T18:35:37,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T18:35:37,054 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:35:37,055 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T18:35:37,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/nfs.dump.dir in system properties and HBase conf 2024-11-13T18:35:37,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/java.io.tmpdir in system properties and HBase conf 2024-11-13T18:35:37,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T18:35:37,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T18:35:37,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T18:35:37,095 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:35:37,152 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T18:35:37,153 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,153 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,154 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,154 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,154 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,155 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,155 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,156 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,156 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,156 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,189 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,189 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,189 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,190 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,190 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,190 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,195 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,196 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,196 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,199 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T18:35:37,231 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:35:37,239 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:35:37,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:35:37,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:35:37,258 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:35:37,260 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:35:37,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b4c94a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:35:37,262 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3073e97e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:35:37,424 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1281d817{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/java.io.tmpdir/jetty-localhost-42797-hadoop-hdfs-3_4_1-tests_jar-_-any-18022830933961608844/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:35:37,424 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ac5026a{HTTP/1.1, (http/1.1)}{localhost:42797} 2024-11-13T18:35:37,424 INFO [Time-limited test {}] server.Server(415): Started @313475ms 2024-11-13T18:35:37,437 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T18:35:37,522 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:35:37,525 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:35:37,526 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:35:37,526 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:35:37,526 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:35:37,526 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@333ec6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:35:37,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18478920{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:35:37,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3910812a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/java.io.tmpdir/jetty-localhost-42351-hadoop-hdfs-3_4_1-tests_jar-_-any-7782624760988809686/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:37,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31d39f87{HTTP/1.1, (http/1.1)}{localhost:42351} 2024-11-13T18:35:37,652 INFO [Time-limited test {}] server.Server(415): Started @313703ms 2024-11-13T18:35:37,653 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:35:37,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T18:35:37,701 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T18:35:37,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T18:35:37,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T18:35:37,702 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T18:35:37,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f9a8217{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,AVAILABLE} 2024-11-13T18:35:37,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b13a29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T18:35:37,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:37,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:37,743 WARN [Thread-2470 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data2/current/BP-1158409345-172.17.0.3-1731522937102/current, will proceed with Du for space computation calculation, 2024-11-13T18:35:37,743 WARN [Thread-2469 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data1/current/BP-1158409345-172.17.0.3-1731522937102/current, will proceed with Du for space computation calculation, 2024-11-13T18:35:37,767 WARN [Thread-2448 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:35:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25eff2731407950 with lease ID 0xd1a4c0688a038eeb: Processing first storage report for DS-51d21ca5-7bd5-4077-8d4e-d87b16d0798e from datanode DatanodeRegistration(127.0.0.1:34951, datanodeUuid=39555c80-8c45-4429-a463-11600cae136a, infoPort=42389, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102) 2024-11-13T18:35:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25eff2731407950 with lease ID 0xd1a4c0688a038eeb: from storage DS-51d21ca5-7bd5-4077-8d4e-d87b16d0798e node DatanodeRegistration(127.0.0.1:34951, datanodeUuid=39555c80-8c45-4429-a463-11600cae136a, infoPort=42389, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:35:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25eff2731407950 with lease ID 0xd1a4c0688a038eeb: Processing first storage report for DS-b496d3f1-131d-477b-b359-277747f5e41e from datanode DatanodeRegistration(127.0.0.1:34951, datanodeUuid=39555c80-8c45-4429-a463-11600cae136a, infoPort=42389, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102) 2024-11-13T18:35:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25eff2731407950 with lease ID 0xd1a4c0688a038eeb: from storage DS-b496d3f1-131d-477b-b359-277747f5e41e node DatanodeRegistration(127.0.0.1:34951, datanodeUuid=39555c80-8c45-4429-a463-11600cae136a, infoPort=42389, infoSecurePort=0, ipcPort=44603, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T18:35:37,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56063e0c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/java.io.tmpdir/jetty-localhost-40373-hadoop-hdfs-3_4_1-tests_jar-_-any-2251287646851607940/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:37,824 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61edd007{HTTP/1.1, (http/1.1)}{localhost:40373} 2024-11-13T18:35:37,824 INFO [Time-limited test {}] server.Server(415): Started @313875ms 2024-11-13T18:35:37,825 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T18:35:37,919 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data3/current/BP-1158409345-172.17.0.3-1731522937102/current, will proceed with Du for space computation calculation, 2024-11-13T18:35:37,920 WARN [Thread-2496 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data4/current/BP-1158409345-172.17.0.3-1731522937102/current, will proceed with Du for space computation calculation, 2024-11-13T18:35:37,945 WARN [Thread-2484 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T18:35:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26ed1bc692165b73 with lease ID 0xd1a4c0688a038eec: Processing first storage report for DS-fee5947f-7b7d-40eb-a31b-e5419a7e4a94 from datanode DatanodeRegistration(127.0.0.1:39213, datanodeUuid=ba1858f7-7d62-4c64-a2b2-02b7210ce630, infoPort=34897, infoSecurePort=0, ipcPort=42661, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102) 2024-11-13T18:35:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26ed1bc692165b73 with lease ID 0xd1a4c0688a038eec: from storage DS-fee5947f-7b7d-40eb-a31b-e5419a7e4a94 node DatanodeRegistration(127.0.0.1:39213, datanodeUuid=ba1858f7-7d62-4c64-a2b2-02b7210ce630, infoPort=34897, infoSecurePort=0, ipcPort=42661, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:35:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x26ed1bc692165b73 with lease ID 0xd1a4c0688a038eec: Processing first storage report for DS-f4a601ad-c620-4fe6-ba3d-c690240295ff from datanode DatanodeRegistration(127.0.0.1:39213, datanodeUuid=ba1858f7-7d62-4c64-a2b2-02b7210ce630, infoPort=34897, infoSecurePort=0, ipcPort=42661, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102) 2024-11-13T18:35:37,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x26ed1bc692165b73 with lease ID 0xd1a4c0688a038eec: from storage DS-f4a601ad-c620-4fe6-ba3d-c690240295ff node DatanodeRegistration(127.0.0.1:39213, datanodeUuid=ba1858f7-7d62-4c64-a2b2-02b7210ce630, infoPort=34897, infoSecurePort=0, ipcPort=42661, storageInfo=lv=-57;cid=testClusterID;nsid=1400810328;c=1731522937102), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T18:35:38,048 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446 2024-11-13T18:35:38,051 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/zookeeper_0, clientPort=58922, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T18:35:38,052 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58922 2024-11-13T18:35:38,052 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:35:38,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741825_1001 (size=7) 2024-11-13T18:35:38,064 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5 with version=8 2024-11-13T18:35:38,064 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39907/user/jenkins/test-data/687cb194-bbba-4527-2898-3a756b75e6b5/hbase-staging 2024-11-13T18:35:38,066 INFO [Time-limited test {}] client.ConnectionUtils(128): master/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:35:38,066 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T18:35:38,067 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:35:38,068 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43285 2024-11-13T18:35:38,069 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43285 connecting to ZooKeeper ensemble=127.0.0.1:58922 2024-11-13T18:35:38,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:432850x0, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:35:38,075 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43285-0x100ed62396d0000 connected 2024-11-13T18:35:38,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:38,092 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5, hbase.cluster.distributed=false 2024-11-13T18:35:38,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:35:38,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43285 2024-11-13T18:35:38,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43285 2024-11-13T18:35:38,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43285 2024-11-13T18:35:38,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43285 2024-11-13T18:35:38,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43285 2024-11-13T18:35:38,109 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/39e84130bbc9:0 server-side Connection retries=45 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T18:35:38,109 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T18:35:38,110 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33467 2024-11-13T18:35:38,111 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33467 connecting to ZooKeeper ensemble=127.0.0.1:58922 2024-11-13T18:35:38,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334670x0, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T18:35:38,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33467-0x100ed62396d0001 connected 2024-11-13T18:35:38,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:38,119 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T18:35:38,120 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T18:35:38,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T18:35:38,121 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T18:35:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33467 2024-11-13T18:35:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33467 2024-11-13T18:35:38,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33467 2024-11-13T18:35:38,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33467 2024-11-13T18:35:38,123 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33467 2024-11-13T18:35:38,134 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;39e84130bbc9:43285 2024-11-13T18:35:38,134 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:35:38,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:35:38,136 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T18:35:38,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,138 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T18:35:38,139 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/39e84130bbc9,43285,1731522938066 from backup master directory 2024-11-13T18:35:38,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:35:38,140 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:35:38,140 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T18:35:38,144 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/hbase.id] with ID: 93271203-d977-4cb9-ad92-4b191ffa57a7 2024-11-13T18:35:38,144 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/.tmp/hbase.id 2024-11-13T18:35:38,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:35:38,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741826_1002 (size=42) 2024-11-13T18:35:38,150 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/.tmp/hbase.id]:[hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/hbase.id] 2024-11-13T18:35:38,161 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:38,161 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T18:35:38,163 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T18:35:38,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:35:38,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741827_1003 (size=196) 2024-11-13T18:35:38,171 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T18:35:38,172 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T18:35:38,172 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:35:38,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:35:38,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741828_1004 (size=1189) 2024-11-13T18:35:38,179 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store 2024-11-13T18:35:38,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:35:38,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741829_1005 (size=34) 2024-11-13T18:35:38,187 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:35:38,188 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:35:38,188 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:38,188 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:38,188 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:35:38,188 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:38,188 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:38,188 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522938187Disabling compacts and flushes for region at 1731522938187Disabling writes for close at 1731522938188 (+1 ms)Writing region close event to WAL at 1731522938188Closed at 1731522938188 2024-11-13T18:35:38,189 WARN [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/.initializing 2024-11-13T18:35:38,189 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/WALs/39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,191 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C43285%2C1731522938066, suffix=, logDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/WALs/39e84130bbc9,43285,1731522938066, archiveDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/oldWALs, maxLogs=10 2024-11-13T18:35:38,191 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C43285%2C1731522938066.1731522938191 2024-11-13T18:35:38,196 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/WALs/39e84130bbc9,43285,1731522938066/39e84130bbc9%2C43285%2C1731522938066.1731522938191 2024-11-13T18:35:38,196 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34897:34897),(127.0.0.1/127.0.0.1:42389:42389)] 2024-11-13T18:35:38,197 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:35:38,197 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:35:38,197 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,197 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T18:35:38,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T18:35:38,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:35:38,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T18:35:38,203 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:35:38,203 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T18:35:38,204 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T18:35:38,205 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,205 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,206 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,207 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,207 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,207 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T18:35:38,208 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T18:35:38,210 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:35:38,210 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878983, jitterRate=0.11768564581871033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T18:35:38,211 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731522938198Initializing all the Stores at 1731522938198Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938198Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522938198Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522938198Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522938198Cleaning up temporary data from old regions at 1731522938207 (+9 ms)Region opened successfully at 1731522938211 (+4 ms) 2024-11-13T18:35:38,211 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T18:35:38,214 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f3d5c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:35:38,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T18:35:38,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T18:35:38,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T18:35:38,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T18:35:38,215 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T18:35:38,216 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T18:35:38,216 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T18:35:38,217 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T18:35:38,218 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T18:35:38,220 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T18:35:38,220 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T18:35:38,220 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T18:35:38,222 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T18:35:38,222 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T18:35:38,223 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T18:35:38,224 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T18:35:38,225 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T18:35:38,226 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T18:35:38,228 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T18:35:38,229 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T18:35:38,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:38,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:38,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,232 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=39e84130bbc9,43285,1731522938066, sessionid=0x100ed62396d0000, setting cluster-up flag (Was=false) 2024-11-13T18:35:38,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,240 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T18:35:38,241 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,249 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T18:35:38,250 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=39e84130bbc9,43285,1731522938066 2024-11-13T18:35:38,251 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T18:35:38,253 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T18:35:38,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T18:35:38,253 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T18:35:38,253 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 39e84130bbc9,43285,1731522938066 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/39e84130bbc9:0, corePoolSize=5, maxPoolSize=5 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/39e84130bbc9:0, corePoolSize=10, maxPoolSize=10 2024-11-13T18:35:38,254 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,255 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:35:38,255 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731522968256 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T18:35:38,256 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T18:35:38,256 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T18:35:38,256 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T18:35:38,257 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,257 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T18:35:38,257 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T18:35:38,257 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T18:35:38,257 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T18:35:38,258 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T18:35:38,258 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522938258,5,FailOnTimeoutGroup] 2024-11-13T18:35:38,258 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522938258,5,FailOnTimeoutGroup] 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,258 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:35:38,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741831_1007 (size=1321) 2024-11-13T18:35:38,270 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T18:35:38,270 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5 2024-11-13T18:35:38,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:35:38,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741832_1008 (size=32) 2024-11-13T18:35:38,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:35:38,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:35:38,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:35:38,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:35:38,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:35:38,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:35:38,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:35:38,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:35:38,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:35:38,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:35:38,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740 2024-11-13T18:35:38,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740 2024-11-13T18:35:38,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:35:38,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:35:38,295 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:35:38,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:35:38,298 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T18:35:38,298 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705680, jitterRate=-0.10268239676952362}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:35:38,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731522938285Initializing all the Stores at 1731522938286 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938286Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938286Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522938286Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938286Cleaning up temporary data from old regions at 1731522938294 (+8 ms)Region opened successfully at 1731522938299 (+5 ms) 2024-11-13T18:35:38,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:35:38,300 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:35:38,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:35:38,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:35:38,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:35:38,301 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:35:38,301 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522938299Disabling compacts and flushes for region at 1731522938299Disabling writes for close at 1731522938300 (+1 ms)Writing region close event to WAL at 1731522938301 (+1 ms)Closed at 1731522938301 2024-11-13T18:35:38,303 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:35:38,303 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T18:35:38,303 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T18:35:38,305 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:35:38,306 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T18:35:38,325 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(746): ClusterId : 93271203-d977-4cb9-ad92-4b191ffa57a7 2024-11-13T18:35:38,325 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T18:35:38,327 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T18:35:38,327 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T18:35:38,330 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T18:35:38,330 DEBUG [RS:0;39e84130bbc9:33467 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d9083c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=39e84130bbc9/172.17.0.3:0 2024-11-13T18:35:38,348 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;39e84130bbc9:33467 2024-11-13T18:35:38,348 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T18:35:38,348 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T18:35:38,348 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T18:35:38,349 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(2659): reportForDuty to master=39e84130bbc9,43285,1731522938066 with port=33467, startcode=1731522938109 2024-11-13T18:35:38,349 DEBUG [RS:0;39e84130bbc9:33467 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T18:35:38,351 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41451, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T18:35:38,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43285 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43285 {}] master.ServerManager(517): Registering regionserver=39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,353 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5 2024-11-13T18:35:38,353 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44461 2024-11-13T18:35:38,353 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T18:35:38,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:35:38,355 DEBUG [RS:0;39e84130bbc9:33467 {}] zookeeper.ZKUtil(111): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,355 WARN [RS:0;39e84130bbc9:33467 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T18:35:38,355 INFO [RS:0;39e84130bbc9:33467 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:35:38,355 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,356 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [39e84130bbc9,33467,1731522938109] 2024-11-13T18:35:38,359 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T18:35:38,361 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T18:35:38,361 INFO [RS:0;39e84130bbc9:33467 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T18:35:38,361 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,362 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T18:35:38,362 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T18:35:38,362 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/39e84130bbc9:0, corePoolSize=2, maxPoolSize=2 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/39e84130bbc9:0, corePoolSize=1, maxPoolSize=1 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:35:38,363 DEBUG [RS:0;39e84130bbc9:33467 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/39e84130bbc9:0, corePoolSize=3, maxPoolSize=3 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,367 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33467,1731522938109-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:35:38,382 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T18:35:38,382 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,33467,1731522938109-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,382 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,382 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.Replication(171): 39e84130bbc9,33467,1731522938109 started 2024-11-13T18:35:38,396 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,396 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1482): Serving as 39e84130bbc9,33467,1731522938109, RpcServer on 39e84130bbc9/172.17.0.3:33467, sessionid=0x100ed62396d0001 2024-11-13T18:35:38,397 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T18:35:38,397 DEBUG [RS:0;39e84130bbc9:33467 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,397 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,33467,1731522938109' 2024-11-13T18:35:38,397 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T18:35:38,397 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '39e84130bbc9,33467,1731522938109' 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T18:35:38,398 DEBUG [RS:0;39e84130bbc9:33467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T18:35:38,398 INFO [RS:0;39e84130bbc9:33467 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T18:35:38,398 INFO [RS:0;39e84130bbc9:33467 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T18:35:38,456 WARN [39e84130bbc9:43285 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T18:35:38,500 INFO [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C33467%2C1731522938109, suffix=, logDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/39e84130bbc9,33467,1731522938109, archiveDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs, maxLogs=32 2024-11-13T18:35:38,501 INFO [RS:0;39e84130bbc9:33467 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C33467%2C1731522938109.1731522938500 2024-11-13T18:35:38,506 INFO [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/39e84130bbc9,33467,1731522938109/39e84130bbc9%2C33467%2C1731522938109.1731522938500 2024-11-13T18:35:38,507 DEBUG [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34897:34897),(127.0.0.1/127.0.0.1:42389:42389)] 2024-11-13T18:35:38,706 DEBUG [39e84130bbc9:43285 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T18:35:38,707 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,708 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,33467,1731522938109, state=OPENING 2024-11-13T18:35:38,710 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T18:35:38,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:38,712 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:35:38,712 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:35:38,712 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T18:35:38,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,33467,1731522938109}] 2024-11-13T18:35:38,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,40543,1731522730238/39e84130bbc9%2C40543%2C1731522730238.meta.1731522731650.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:38,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34359/user/jenkins/test-data/e0d1b516-fc1f-c99c-510b-6b8fe86a179e/WALs/39e84130bbc9,38555,1731522731797/39e84130bbc9%2C38555%2C1731522731797.1731522731994 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T18:35:38,865 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T18:35:38,867 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53993, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T18:35:38,871 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T18:35:38,871 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:35:38,872 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=39e84130bbc9%2C33467%2C1731522938109.meta, suffix=.meta, logDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/39e84130bbc9,33467,1731522938109, archiveDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs, maxLogs=32 2024-11-13T18:35:38,873 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 39e84130bbc9%2C33467%2C1731522938109.meta.1731522938873.meta 2024-11-13T18:35:38,882 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/39e84130bbc9,33467,1731522938109/39e84130bbc9%2C33467%2C1731522938109.meta.1731522938873.meta 2024-11-13T18:35:38,884 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42389:42389),(127.0.0.1/127.0.0.1:34897:34897)] 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T18:35:38,886 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T18:35:38,886 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T18:35:38,888 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T18:35:38,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T18:35:38,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T18:35:38,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T18:35:38,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T18:35:38,892 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T18:35:38,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T18:35:38,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T18:35:38,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T18:35:38,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T18:35:38,894 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T18:35:38,894 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740 2024-11-13T18:35:38,895 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740 2024-11-13T18:35:38,896 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T18:35:38,896 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T18:35:38,897 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T18:35:38,898 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T18:35:38,899 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734178, jitterRate=-0.06644488871097565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T18:35:38,899 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T18:35:38,900 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731522938886Writing region info on filesystem at 1731522938886Initializing all the Stores at 1731522938887 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938887Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938888 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731522938888Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731522938888Cleaning up temporary data from old regions at 1731522938896 (+8 ms)Running coprocessor post-open hooks at 1731522938899 (+3 ms)Region opened successfully at 1731522938899 2024-11-13T18:35:38,900 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731522938865 2024-11-13T18:35:38,903 DEBUG [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T18:35:38,903 INFO [RS_OPEN_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T18:35:38,903 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,904 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 39e84130bbc9,33467,1731522938109, state=OPEN 2024-11-13T18:35:38,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:35:38,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T18:35:38,909 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=39e84130bbc9,33467,1731522938109 2024-11-13T18:35:38,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:35:38,909 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T18:35:38,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T18:35:38,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=39e84130bbc9,33467,1731522938109 in 197 msec 2024-11-13T18:35:38,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T18:35:38,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 608 msec 2024-11-13T18:35:38,914 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T18:35:38,914 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T18:35:38,915 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:35:38,915 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,33467,1731522938109, seqNum=-1] 2024-11-13T18:35:38,916 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:35:38,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36465, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:35:38,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 668 msec 2024-11-13T18:35:38,921 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731522938921, completionTime=-1 2024-11-13T18:35:38,921 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T18:35:38,921 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T18:35:38,923 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T18:35:38,923 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731522998923 2024-11-13T18:35:38,923 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731523058923 2024-11-13T18:35:38,923 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-39e84130bbc9:43285, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,924 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:38,926 DEBUG [master/39e84130bbc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.788sec 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T18:35:38,928 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T18:35:38,930 DEBUG [master/39e84130bbc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T18:35:38,930 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T18:35:38,930 INFO [master/39e84130bbc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=39e84130bbc9,43285,1731522938066-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T18:35:39,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10391316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:35:39,025 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 39e84130bbc9,43285,-1 for getting cluster id 2024-11-13T18:35:39,025 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T18:35:39,027 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '93271203-d977-4cb9-ad92-4b191ffa57a7' 2024-11-13T18:35:39,027 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T18:35:39,027 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "93271203-d977-4cb9-ad92-4b191ffa57a7" 2024-11-13T18:35:39,027 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb83a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:35:39,027 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [39e84130bbc9,43285,-1] 2024-11-13T18:35:39,028 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T18:35:39,028 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,029 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55714, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T18:35:39,030 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14628127, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T18:35:39,030 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T18:35:39,031 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=39e84130bbc9,33467,1731522938109, seqNum=-1] 2024-11-13T18:35:39,031 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T18:35:39,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43666, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T18:35:39,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=39e84130bbc9,43285,1731522938066 2024-11-13T18:35:39,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T18:35:39,038 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T18:35:39,038 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T18:35:39,040 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs, maxLogs=32 2024-11-13T18:35:39,041 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731522939041 2024-11-13T18:35:39,049 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1/test.com%2C8080%2C1.1731522939041 2024-11-13T18:35:39,050 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34897:34897),(127.0.0.1/127.0.0.1:42389:42389)] 2024-11-13T18:35:39,051 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731522939051 2024-11-13T18:35:39,055 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,055 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,055 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,055 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,056 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,056 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1/test.com%2C8080%2C1.1731522939041 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1/test.com%2C8080%2C1.1731522939051 2024-11-13T18:35:39,057 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34897:34897),(127.0.0.1/127.0.0.1:42389:42389)] 2024-11-13T18:35:39,057 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1/test.com%2C8080%2C1.1731522939041 is not closed yet, will try archiving it next time 2024-11-13T18:35:39,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741835_1011 (size=93) 2024-11-13T18:35:39,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741835_1011 (size=93) 2024-11-13T18:35:39,058 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,058 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,058 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,058 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,058 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,059 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/WALs/test.com,8080,1/test.com%2C8080%2C1.1731522939041 to hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs/test.com%2C8080%2C1.1731522939041 2024-11-13T18:35:39,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741836_1012 (size=93) 2024-11-13T18:35:39,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741836_1012 (size=93) 2024-11-13T18:35:39,062 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs 2024-11-13T18:35:39,062 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731522939051) 2024-11-13T18:35:39,063 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T18:35:39,063 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:35:39,063 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:39,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,063 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T18:35:39,063 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T18:35:39,063 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1762624899, stopped=false 2024-11-13T18:35:39,063 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=39e84130bbc9,43285,1731522938066 2024-11-13T18:35:39,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:39,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:39,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T18:35:39,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:39,065 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:35:39,065 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T18:35:39,065 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:39,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,066 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '39e84130bbc9,33467,1731522938109' ***** 2024-11-13T18:35:39,066 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T18:35:39,066 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T18:35:39,066 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(959): stopping server 39e84130bbc9,33467,1731522938109 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;39e84130bbc9:33467. 2024-11-13T18:35:39,066 DEBUG [RS:0;39e84130bbc9:33467 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T18:35:39,066 DEBUG [RS:0;39e84130bbc9:33467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,066 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T18:35:39,067 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T18:35:39,067 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T18:35:39,067 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T18:35:39,067 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T18:35:39,071 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T18:35:39,071 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T18:35:39,071 DEBUG [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T18:35:39,071 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T18:35:39,071 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T18:35:39,071 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T18:35:39,071 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T18:35:39,071 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T18:35:39,071 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T18:35:39,087 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/.tmp/ns/fc3ab880fe2e4bb4b3df2ffca7f09843 is 43, key is default/ns:d/1731522938917/Put/seqid=0 2024-11-13T18:35:39,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741837_1013 (size=5153) 2024-11-13T18:35:39,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741837_1013 (size=5153) 2024-11-13T18:35:39,097 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/.tmp/ns/fc3ab880fe2e4bb4b3df2ffca7f09843 2024-11-13T18:35:39,103 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/.tmp/ns/fc3ab880fe2e4bb4b3df2ffca7f09843 as hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/ns/fc3ab880fe2e4bb4b3df2ffca7f09843 2024-11-13T18:35:39,108 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/ns/fc3ab880fe2e4bb4b3df2ffca7f09843, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T18:35:39,109 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-11-13T18:35:39,113 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T18:35:39,113 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T18:35:39,114 INFO [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T18:35:39,114 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731522939071Running coprocessor pre-close hooks at 1731522939071Disabling compacts and flushes for region at 1731522939071Disabling writes for close at 1731522939071Obtaining lock to block concurrent updates at 1731522939071Preparing flush snapshotting stores in 1588230740 at 1731522939071Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731522939071Flushing stores of hbase:meta,,1.1588230740 at 1731522939072 (+1 ms)Flushing 1588230740/ns: creating writer at 1731522939072Flushing 1588230740/ns: appending metadata at 1731522939086 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731522939086Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31f54dfd: reopening flushed file at 1731522939102 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1731522939109 (+7 ms)Writing region close event to WAL at 1731522939109Running coprocessor post-close hooks at 1731522939113 (+4 ms)Closed at 1731522939114 (+1 ms) 2024-11-13T18:35:39,114 DEBUG [RS_CLOSE_META-regionserver/39e84130bbc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T18:35:39,271 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(976): stopping server 39e84130bbc9,33467,1731522938109; all regions closed. 2024-11-13T18:35:39,271 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,272 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,272 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,272 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,272 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741834_1010 (size=1152) 2024-11-13T18:35:39,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741834_1010 (size=1152) 2024-11-13T18:35:39,276 DEBUG [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs 2024-11-13T18:35:39,276 INFO [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C33467%2C1731522938109.meta:.meta(num 1731522938873) 2024-11-13T18:35:39,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741833_1009 (size=93) 2024-11-13T18:35:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741833_1009 (size=93) 2024-11-13T18:35:39,282 DEBUG [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/oldWALs 2024-11-13T18:35:39,282 INFO [RS:0;39e84130bbc9:33467 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 39e84130bbc9%2C33467%2C1731522938109:(num 1731522938500) 2024-11-13T18:35:39,282 DEBUG [RS:0;39e84130bbc9:33467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T18:35:39,282 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T18:35:39,282 INFO [RS:0;39e84130bbc9:33467 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:35:39,282 INFO [RS:0;39e84130bbc9:33467 {}] hbase.ChoreService(370): Chore service for: regionserver/39e84130bbc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T18:35:39,283 INFO [regionserver/39e84130bbc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:35:39,283 INFO [RS:0;39e84130bbc9:33467 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:35:39,283 INFO [RS:0;39e84130bbc9:33467 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33467 2024-11-13T18:35:39,285 INFO [RS:0;39e84130bbc9:33467 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:35:39,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T18:35:39,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/39e84130bbc9,33467,1731522938109 2024-11-13T18:35:39,287 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [39e84130bbc9,33467,1731522938109] 2024-11-13T18:35:39,288 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/39e84130bbc9,33467,1731522938109 already deleted, retry=false 2024-11-13T18:35:39,288 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 39e84130bbc9,33467,1731522938109 expired; onlineServers=0 2024-11-13T18:35:39,288 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '39e84130bbc9,43285,1731522938066' ***** 2024-11-13T18:35:39,288 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T18:35:39,288 INFO [M:0;39e84130bbc9:43285 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T18:35:39,288 INFO [M:0;39e84130bbc9:43285 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T18:35:39,288 DEBUG [M:0;39e84130bbc9:43285 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T18:35:39,288 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T18:35:39,288 DEBUG [M:0;39e84130bbc9:43285 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T18:35:39,288 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522938258 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.large.0-1731522938258,5,FailOnTimeoutGroup] 2024-11-13T18:35:39,289 INFO [M:0;39e84130bbc9:43285 {}] hbase.ChoreService(370): Chore service for: master/39e84130bbc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T18:35:39,289 INFO [M:0;39e84130bbc9:43285 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T18:35:39,289 DEBUG [master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522938258 {}] cleaner.HFileCleaner(306): Exit Thread[master/39e84130bbc9:0:becomeActiveMaster-HFileCleaner.small.0-1731522938258,5,FailOnTimeoutGroup] 2024-11-13T18:35:39,289 DEBUG [M:0;39e84130bbc9:43285 {}] master.HMaster(1795): Stopping service threads 2024-11-13T18:35:39,289 INFO [M:0;39e84130bbc9:43285 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T18:35:39,289 INFO [M:0;39e84130bbc9:43285 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T18:35:39,289 INFO [M:0;39e84130bbc9:43285 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T18:35:39,289 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T18:35:39,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T18:35:39,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T18:35:39,290 DEBUG [M:0;39e84130bbc9:43285 {}] zookeeper.ZKUtil(347): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T18:35:39,290 WARN [M:0;39e84130bbc9:43285 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T18:35:39,291 INFO [M:0;39e84130bbc9:43285 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/.lastflushedseqids 2024-11-13T18:35:39,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741838_1014 (size=99) 2024-11-13T18:35:39,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741838_1014 (size=99) 2024-11-13T18:35:39,297 INFO [M:0;39e84130bbc9:43285 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T18:35:39,297 INFO [M:0;39e84130bbc9:43285 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T18:35:39,297 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T18:35:39,297 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:39,297 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:39,297 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T18:35:39,297 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:39,297 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T18:35:39,320 DEBUG [M:0;39e84130bbc9:43285 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/223a305114334e05abf54f948d7cd8cf is 82, key is hbase:meta,,1/info:regioninfo/1731522938903/Put/seqid=0 2024-11-13T18:35:39,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741839_1015 (size=5672) 2024-11-13T18:35:39,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741839_1015 (size=5672) 2024-11-13T18:35:39,325 INFO [M:0;39e84130bbc9:43285 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/223a305114334e05abf54f948d7cd8cf 2024-11-13T18:35:39,350 DEBUG [M:0;39e84130bbc9:43285 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/544c50d1073942748df30a3241092b0b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731522938920/Put/seqid=0 2024-11-13T18:35:39,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741840_1016 (size=5275) 2024-11-13T18:35:39,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741840_1016 (size=5275) 2024-11-13T18:35:39,355 INFO [M:0;39e84130bbc9:43285 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/544c50d1073942748df30a3241092b0b 2024-11-13T18:35:39,374 DEBUG [M:0;39e84130bbc9:43285 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/760a282fcb7a43829879767fbb644441 is 69, key is 39e84130bbc9,33467,1731522938109/rs:state/1731522938352/Put/seqid=0 2024-11-13T18:35:39,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741841_1017 (size=5156) 2024-11-13T18:35:39,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741841_1017 (size=5156) 2024-11-13T18:35:39,381 INFO [M:0;39e84130bbc9:43285 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/760a282fcb7a43829879767fbb644441 2024-11-13T18:35:39,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:39,387 INFO [RS:0;39e84130bbc9:33467 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:35:39,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33467-0x100ed62396d0001, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:39,387 INFO [RS:0;39e84130bbc9:33467 {}] regionserver.HRegionServer(1031): Exiting; stopping=39e84130bbc9,33467,1731522938109; zookeeper connection closed. 2024-11-13T18:35:39,387 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@48a388ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@48a388ab 2024-11-13T18:35:39,388 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T18:35:39,400 DEBUG [M:0;39e84130bbc9:43285 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a668eec289124d708b3ae6cce70a52cd is 52, key is load_balancer_on/state:d/1731522939037/Put/seqid=0 2024-11-13T18:35:39,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741842_1018 (size=5056) 2024-11-13T18:35:39,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741842_1018 (size=5056) 2024-11-13T18:35:39,405 INFO [M:0;39e84130bbc9:43285 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a668eec289124d708b3ae6cce70a52cd 2024-11-13T18:35:39,411 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/223a305114334e05abf54f948d7cd8cf as hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/223a305114334e05abf54f948d7cd8cf 2024-11-13T18:35:39,416 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/223a305114334e05abf54f948d7cd8cf, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T18:35:39,417 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/544c50d1073942748df30a3241092b0b as hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/544c50d1073942748df30a3241092b0b 2024-11-13T18:35:39,422 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/544c50d1073942748df30a3241092b0b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T18:35:39,423 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/760a282fcb7a43829879767fbb644441 as hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/760a282fcb7a43829879767fbb644441 2024-11-13T18:35:39,428 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/760a282fcb7a43829879767fbb644441, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T18:35:39,429 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a668eec289124d708b3ae6cce70a52cd as hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a668eec289124d708b3ae6cce70a52cd 2024-11-13T18:35:39,433 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44461/user/jenkins/test-data/8d8b1678-4a91-faf9-bc2f-e4db8326bec5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a668eec289124d708b3ae6cce70a52cd, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T18:35:39,434 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=29, compaction requested=false 2024-11-13T18:35:39,435 INFO [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T18:35:39,436 DEBUG [M:0;39e84130bbc9:43285 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731522939297Disabling compacts and flushes for region at 1731522939297Disabling writes for close at 1731522939297Obtaining lock to block concurrent updates at 1731522939297Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731522939297Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731522939298 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731522939298Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731522939298Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731522939319 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731522939319Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731522939331 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731522939349 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731522939349Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731522939359 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731522939374 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731522939374Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731522939385 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731522939400 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731522939400Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@216da41b: reopening flushed file at 1731522939410 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65b73a78: reopening flushed file at 1731522939416 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c48ecb1: reopening flushed file at 1731522939422 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3aeb35d9: reopening flushed file at 1731522939428 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=29, compaction requested=false at 1731522939434 (+6 ms)Writing region close event to WAL at 1731522939435 (+1 ms)Closed at 1731522939435 2024-11-13T18:35:39,436 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,436 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,436 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,436 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,436 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T18:35:39,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34951 is added to blk_1073741830_1006 (size=10311) 2024-11-13T18:35:39,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39213 is added to blk_1073741830_1006 (size=10311) 2024-11-13T18:35:39,439 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T18:35:39,439 INFO [M:0;39e84130bbc9:43285 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T18:35:39,439 INFO [M:0;39e84130bbc9:43285 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43285 2024-11-13T18:35:39,439 INFO [M:0;39e84130bbc9:43285 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T18:35:39,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:39,543 INFO [M:0;39e84130bbc9:43285 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T18:35:39,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43285-0x100ed62396d0000, quorum=127.0.0.1:58922, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T18:35:39,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56063e0c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:39,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61edd007{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:39,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:39,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b13a29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:39,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f9a8217{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:39,548 WARN [BP-1158409345-172.17.0.3-1731522937102 heartbeating to localhost/127.0.0.1:44461 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:35:39,548 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:35:39,548 WARN [BP-1158409345-172.17.0.3-1731522937102 heartbeating to localhost/127.0.0.1:44461 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1158409345-172.17.0.3-1731522937102 (Datanode Uuid ba1858f7-7d62-4c64-a2b2-02b7210ce630) service to localhost/127.0.0.1:44461 2024-11-13T18:35:39,548 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:35:39,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data3/current/BP-1158409345-172.17.0.3-1731522937102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:39,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data4/current/BP-1158409345-172.17.0.3-1731522937102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:39,549 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:35:39,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3910812a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T18:35:39,554 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31d39f87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:39,554 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:39,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18478920{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:39,555 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@333ec6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:39,556 WARN [BP-1158409345-172.17.0.3-1731522937102 heartbeating to localhost/127.0.0.1:44461 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T18:35:39,556 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T18:35:39,556 WARN [BP-1158409345-172.17.0.3-1731522937102 heartbeating to localhost/127.0.0.1:44461 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1158409345-172.17.0.3-1731522937102 (Datanode Uuid 39555c80-8c45-4429-a463-11600cae136a) service to localhost/127.0.0.1:44461 2024-11-13T18:35:39,556 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T18:35:39,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data1/current/BP-1158409345-172.17.0.3-1731522937102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:39,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/cluster_30c55432-2486-d92e-ef7c-46b3875f9bcb/data/data2/current/BP-1158409345-172.17.0.3-1731522937102 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T18:35:39,557 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T18:35:39,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1281d817{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T18:35:39,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ac5026a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T18:35:39,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T18:35:39,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3073e97e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T18:35:39,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b4c94a0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d9ab4104-e784-9757-087d-f56c137ad446/hadoop.log.dir/,STOPPED} 2024-11-13T18:35:39,575 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T18:35:39,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T18:35:39,612 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 226) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44461 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44461 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44461 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44461 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44461 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44461 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/39e84130bbc9:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44461 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'NameNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44461 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=383 (was 382) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1022 (was 1073)