2024-11-18 20:26:41,967 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 20:26:41,981 main DEBUG Took 0.011011 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-18 20:26:41,981 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-18 20:26:41,982 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-18 20:26:41,983 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-18 20:26:41,984 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:41,991 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-18 20:26:42,003 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,004 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,005 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,005 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,006 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,008 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,008 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,009 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,010 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,011 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,012 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,013 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,014 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,015 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,016 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 20:26:42,017 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,018 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-18 20:26:42,020 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 20:26:42,021 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-18 20:26:42,024 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-18 20:26:42,024 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-18 20:26:42,026 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-18 20:26:42,026 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-18 20:26:42,037 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-18 20:26:42,040 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-18 20:26:42,042 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-18 20:26:42,043 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-18 20:26:42,043 main DEBUG createAppenders(={Console}) 2024-11-18 20:26:42,044 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-18 20:26:42,045 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-18 20:26:42,045 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-18 20:26:42,046 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-18 20:26:42,046 main DEBUG OutputStream closed 2024-11-18 20:26:42,046 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-18 20:26:42,047 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-18 20:26:42,047 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-18 20:26:42,139 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-18 20:26:42,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-18 20:26:42,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-18 20:26:42,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-18 20:26:42,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-18 20:26:42,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-18 20:26:42,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-18 20:26:42,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-18 20:26:42,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-18 20:26:42,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-18 20:26:42,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-18 20:26:42,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-18 20:26:42,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-18 20:26:42,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-18 20:26:42,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-18 20:26:42,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-18 20:26:42,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-18 20:26:42,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-18 20:26:42,152 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18 20:26:42,152 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-18 20:26:42,152 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-18 20:26:42,153 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-18T20:26:42,380 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc 2024-11-18 20:26:42,382 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-18 20:26:42,383 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18T20:26:42,391 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-18T20:26:42,423 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=281, ProcessCount=11, AvailableMemoryMB=3680 2024-11-18T20:26:42,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:26:42,445 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56, deleteOnExit=true 2024-11-18T20:26:42,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:26:42,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/test.cache.data in system properties and HBase conf 2024-11-18T20:26:42,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:26:42,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:26:42,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:26:42,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:26:42,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:26:42,540 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-18T20:26:42,628 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:26:42,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:26:42,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:26:42,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:26:42,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:26:42,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:26:42,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:26:42,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:26:42,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:26:42,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:26:42,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:26:42,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:26:42,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:26:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:26:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:26:43,080 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:26:43,391 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-18T20:26:43,461 INFO [Time-limited test {}] log.Log(170): Logging initialized @2155ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-18T20:26:43,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:26:43,582 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:26:43,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:26:43,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:26:43,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:26:43,620 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:26:43,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:26:43,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:26:43,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/java.io.tmpdir/jetty-localhost-46803-hadoop-hdfs-3_4_1-tests_jar-_-any-12905015656601448508/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:26:43,819 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:46803} 2024-11-18T20:26:43,819 INFO [Time-limited test {}] server.Server(415): Started @2514ms 2024-11-18T20:26:43,844 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:26:44,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:26:44,153 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:26:44,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:26:44,155 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:26:44,155 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:26:44,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:26:44,158 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:26:44,258 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/java.io.tmpdir/jetty-localhost-40831-hadoop-hdfs-3_4_1-tests_jar-_-any-4576105099837884997/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:26:44,259 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:40831} 2024-11-18T20:26:44,259 INFO [Time-limited test {}] server.Server(415): Started @2955ms 2024-11-18T20:26:44,309 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:26:44,413 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:26:44,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:26:44,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:26:44,426 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:26:44,426 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:26:44,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:26:44,428 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:26:44,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/java.io.tmpdir/jetty-localhost-40887-hadoop-hdfs-3_4_1-tests_jar-_-any-5835388933561122269/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:26:44,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:40887} 2024-11-18T20:26:44,570 INFO [Time-limited test {}] server.Server(415): Started @3265ms 2024-11-18T20:26:44,573 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:26:44,676 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data3/current/BP-2064152689-172.17.0.2-1731961603167/current, will proceed with Du for space computation calculation, 2024-11-18T20:26:44,676 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data1/current/BP-2064152689-172.17.0.2-1731961603167/current, will proceed with Du for space computation calculation, 2024-11-18T20:26:44,676 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data2/current/BP-2064152689-172.17.0.2-1731961603167/current, will proceed with Du for space computation calculation, 2024-11-18T20:26:44,681 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data4/current/BP-2064152689-172.17.0.2-1731961603167/current, will proceed with Du for space computation calculation, 2024-11-18T20:26:44,732 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:26:44,732 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:26:44,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3106157d5baf4018 with lease ID 0xa9f30de006cf9abc: Processing first storage report for DS-665db41d-f112-4c3d-be98-9768899f1003 from datanode DatanodeRegistration(127.0.0.1:44265, datanodeUuid=9b0ee407-939a-467a-982b-e94bef1f3c54, infoPort=42335, infoSecurePort=0, ipcPort=36275, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167) 2024-11-18T20:26:44,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3106157d5baf4018 with lease ID 0xa9f30de006cf9abc: from storage DS-665db41d-f112-4c3d-be98-9768899f1003 node DatanodeRegistration(127.0.0.1:44265, datanodeUuid=9b0ee407-939a-467a-982b-e94bef1f3c54, infoPort=42335, infoSecurePort=0, ipcPort=36275, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-18T20:26:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb83801deb493b4e4 with lease ID 0xa9f30de006cf9abd: Processing first storage report for DS-bb4f9828-3952-47d3-8c37-65ee8781c763 from datanode DatanodeRegistration(127.0.0.1:42115, datanodeUuid=db69b180-a8ba-441d-816b-fe4377c894e2, infoPort=36715, infoSecurePort=0, ipcPort=35749, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167) 2024-11-18T20:26:44,814 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb83801deb493b4e4 with lease ID 0xa9f30de006cf9abd: from storage DS-bb4f9828-3952-47d3-8c37-65ee8781c763 node DatanodeRegistration(127.0.0.1:42115, datanodeUuid=db69b180-a8ba-441d-816b-fe4377c894e2, infoPort=36715, infoSecurePort=0, ipcPort=35749, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:26:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3106157d5baf4018 with lease ID 0xa9f30de006cf9abc: Processing first storage report for DS-4d7108d7-a2a7-484f-a554-9bd16b405217 from datanode DatanodeRegistration(127.0.0.1:44265, datanodeUuid=9b0ee407-939a-467a-982b-e94bef1f3c54, infoPort=42335, infoSecurePort=0, ipcPort=36275, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167) 2024-11-18T20:26:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3106157d5baf4018 with lease ID 0xa9f30de006cf9abc: from storage DS-4d7108d7-a2a7-484f-a554-9bd16b405217 node DatanodeRegistration(127.0.0.1:44265, datanodeUuid=9b0ee407-939a-467a-982b-e94bef1f3c54, infoPort=42335, infoSecurePort=0, ipcPort=36275, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:26:44,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb83801deb493b4e4 with lease ID 0xa9f30de006cf9abd: Processing first storage report for DS-3e1e824d-12c2-4cee-9e13-4f70ccce16cd from datanode DatanodeRegistration(127.0.0.1:42115, datanodeUuid=db69b180-a8ba-441d-816b-fe4377c894e2, infoPort=36715, infoSecurePort=0, ipcPort=35749, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167) 2024-11-18T20:26:44,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb83801deb493b4e4 with lease ID 0xa9f30de006cf9abd: from storage DS-3e1e824d-12c2-4cee-9e13-4f70ccce16cd node DatanodeRegistration(127.0.0.1:42115, datanodeUuid=db69b180-a8ba-441d-816b-fe4377c894e2, infoPort=36715, infoSecurePort=0, ipcPort=35749, storageInfo=lv=-57;cid=testClusterID;nsid=151388022;c=1731961603167), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:26:44,912 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc 2024-11-18T20:26:44,994 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/zookeeper_0, clientPort=65026, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:26:45,004 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65026 2024-11-18T20:26:45,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:45,024 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:45,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:26:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:26:45,660 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094 with version=8 2024-11-18T20:26:45,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:26:45,735 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-18T20:26:45,930 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:26:45,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:45,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:45,943 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:26:45,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:45,944 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:26:46,060 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:26:46,109 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-18T20:26:46,117 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-18T20:26:46,120 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:26:46,141 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 85224 (auto-detected) 2024-11-18T20:26:46,142 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-18T20:26:46,158 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34529 2024-11-18T20:26:46,177 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34529 connecting to ZooKeeper ensemble=127.0.0.1:65026 2024-11-18T20:26:46,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:345290x0, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:26:46,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34529-0x100548427de0000 connected 2024-11-18T20:26:46,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:46,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:46,242 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:26:46,246 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094, hbase.cluster.distributed=false 2024-11-18T20:26:46,269 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:26:46,273 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34529 2024-11-18T20:26:46,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34529 2024-11-18T20:26:46,274 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34529 2024-11-18T20:26:46,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34529 2024-11-18T20:26:46,275 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34529 2024-11-18T20:26:46,375 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:26:46,376 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:46,376 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:46,376 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:26:46,377 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:26:46,377 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:26:46,379 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:26:46,382 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:26:46,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40349 2024-11-18T20:26:46,384 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40349 connecting to ZooKeeper ensemble=127.0.0.1:65026 2024-11-18T20:26:46,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:46,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:46,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:403490x0, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:26:46,397 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40349-0x100548427de0001 connected 2024-11-18T20:26:46,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:403490x0, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:26:46,401 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:26:46,409 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:26:46,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:26:46,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:26:46,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40349 2024-11-18T20:26:46,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40349 2024-11-18T20:26:46,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40349 2024-11-18T20:26:46,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40349 2024-11-18T20:26:46,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40349 2024-11-18T20:26:46,439 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:34529 2024-11-18T20:26:46,440 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:46,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:26:46,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:26:46,447 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:26:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:46,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:46,466 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:26:46,468 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,34529,1731961605780 from backup master directory 2024-11-18T20:26:46,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:46,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:26:46,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:26:46,471 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:26:46,471 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:46,473 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-18T20:26:46,474 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-18T20:26:46,524 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase.id] with ID: d8ae44cc-d567-41bf-b268-85337b02bff6 2024-11-18T20:26:46,524 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/.tmp/hbase.id 2024-11-18T20:26:46,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:26:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:26:46,537 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/.tmp/hbase.id]:[hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase.id] 2024-11-18T20:26:46,582 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:46,587 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:26:46,606 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-18T20:26:46,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:46,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:46,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:26:46,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:26:46,641 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:26:46,642 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:26:46,647 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:26:46,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:26:46,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:26:46,693 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store 2024-11-18T20:26:46,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:26:46,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:26:46,718 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-18T20:26:46,721 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:46,722 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:26:46,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:26:46,722 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:26:46,723 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:26:46,724 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:26:46,724 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:26:46,725 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961606722Disabling compacts and flushes for region at 1731961606722Disabling writes for close at 1731961606724 (+2 ms)Writing region close event to WAL at 1731961606724Closed at 1731961606724 2024-11-18T20:26:46,727 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/.initializing 2024-11-18T20:26:46,727 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/WALs/c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:46,749 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C34529%2C1731961605780, suffix=, logDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/WALs/c0a89b2656d4,34529,1731961605780, archiveDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/oldWALs, maxLogs=10 2024-11-18T20:26:46,757 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34529%2C1731961605780.1731961606753 2024-11-18T20:26:46,776 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/WALs/c0a89b2656d4,34529,1731961605780/c0a89b2656d4%2C34529%2C1731961605780.1731961606753 2024-11-18T20:26:46,783 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36715:36715),(127.0.0.1/127.0.0.1:42335:42335)] 2024-11-18T20:26:46,784 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:26:46,785 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:46,787 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,788 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:26:46,852 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:46,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:46,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:26:46,859 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:46,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:26:46,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,864 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:26:46,864 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:46,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:26:46,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:26:46,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:46,869 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:26:46,870 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,874 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,875 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,880 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,880 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,883 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:26:46,887 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:26:46,890 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:26:46,892 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=748206, jitterRate=-0.048607468605041504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:26:46,898 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961606799Initializing all the Stores at 1731961606801 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961606802 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961606802Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961606803 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961606803Cleaning up temporary data from old regions at 1731961606880 (+77 ms)Region opened successfully at 1731961606898 (+18 ms) 2024-11-18T20:26:46,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:26:46,934 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@177735ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:26:46,961 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:26:46,971 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:26:46,971 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:26:46,973 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:26:46,975 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T20:26:46,979 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-18T20:26:46,979 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:26:47,003 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:26:47,011 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:26:47,012 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:26:47,015 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:26:47,017 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:26:47,018 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:26:47,020 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:26:47,024 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:26:47,025 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:26:47,026 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:26:47,027 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:26:47,043 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:26:47,044 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:26:47,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:26:47,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:26:47,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,051 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,34529,1731961605780, sessionid=0x100548427de0000, setting cluster-up flag (Was=false) 2024-11-18T20:26:47,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,069 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:26:47,071 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:47,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,079 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:26:47,081 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:47,087 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:26:47,129 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(746): ClusterId : d8ae44cc-d567-41bf-b268-85337b02bff6 2024-11-18T20:26:47,132 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:26:47,137 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:26:47,137 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:26:47,139 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:26:47,140 DEBUG [RS:0;c0a89b2656d4:40349 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6d68d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:26:47,154 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:40349 2024-11-18T20:26:47,157 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:26:47,157 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:26:47,157 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:26:47,157 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:26:47,160 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,34529,1731961605780 with port=40349, startcode=1731961606341 2024-11-18T20:26:47,167 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:26:47,170 DEBUG [RS:0;c0a89b2656d4:40349 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:26:47,174 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:26:47,180 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,34529,1731961605780 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:26:47,187 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:26:47,187 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:26:47,187 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:26:47,187 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:26:47,188 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:26:47,188 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,188 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:26:47,188 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,192 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961637192 2024-11-18T20:26:47,193 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:26:47,194 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:26:47,194 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:26:47,195 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:26:47,198 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:26:47,199 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:26:47,199 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:26:47,199 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:26:47,201 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,201 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:26:47,201 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,204 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:26:47,206 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:26:47,206 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:26:47,209 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:26:47,209 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:26:47,215 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961607211,5,FailOnTimeoutGroup] 2024-11-18T20:26:47,218 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961607215,5,FailOnTimeoutGroup] 2024-11-18T20:26:47,218 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,219 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:26:47,220 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,220 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:26:47,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:26:47,226 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:26:47,226 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094 2024-11-18T20:26:47,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:26:47,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:26:47,249 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:47,252 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:26:47,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:26:47,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:26:47,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:26:47,260 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:26:47,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,261 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34529 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:26:47,264 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34529 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:26:47,265 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,267 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:26:47,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:26:47,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:26:47,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740 2024-11-18T20:26:47,274 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740 2024-11-18T20:26:47,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:26:47,278 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:26:47,279 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:26:47,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:26:47,283 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094 2024-11-18T20:26:47,284 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35383 2024-11-18T20:26:47,284 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:26:47,287 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:26:47,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:26:47,288 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691213, jitterRate=-0.12107749283313751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:26:47,289 DEBUG [RS:0;c0a89b2656d4:40349 {}] zookeeper.ZKUtil(111): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,289 WARN [RS:0;c0a89b2656d4:40349 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:26:47,289 INFO [RS:0;c0a89b2656d4:40349 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:26:47,290 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961607249Initializing all the Stores at 1731961607252 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607252Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607252Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961607252Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607252Cleaning up temporary data from old regions at 1731961607278 (+26 ms)Region opened successfully at 1731961607293 (+15 ms) 2024-11-18T20:26:47,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:26:47,293 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:26:47,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:26:47,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:26:47,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:26:47,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,40349,1731961606341] 2024-11-18T20:26:47,296 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:26:47,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961607293Disabling compacts and flushes for region at 1731961607293Disabling writes for close at 1731961607294 (+1 ms)Writing region close event to WAL at 1731961607295 (+1 ms)Closed at 1731961607296 (+1 ms) 2024-11-18T20:26:47,300 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:26:47,300 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:26:47,308 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:26:47,318 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:26:47,320 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:26:47,322 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:26:47,337 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:26:47,342 INFO [RS:0;c0a89b2656d4:40349 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:26:47,342 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,343 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:26:47,348 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:26:47,349 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,349 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:26:47,350 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:26:47,351 DEBUG [RS:0;c0a89b2656d4:40349 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:26:47,353 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,353 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,353 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,354 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,354 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,354 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40349,1731961606341-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:26:47,370 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:26:47,372 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40349,1731961606341-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,373 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,373 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.Replication(171): c0a89b2656d4,40349,1731961606341 started 2024-11-18T20:26:47,390 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:47,390 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,40349,1731961606341, RpcServer on c0a89b2656d4/172.17.0.2:40349, sessionid=0x100548427de0001 2024-11-18T20:26:47,391 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:26:47,391 DEBUG [RS:0;c0a89b2656d4:40349 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,391 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,40349,1731961606341' 2024-11-18T20:26:47,392 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:26:47,393 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:26:47,393 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:26:47,393 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:26:47,393 DEBUG [RS:0;c0a89b2656d4:40349 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,394 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,40349,1731961606341' 2024-11-18T20:26:47,394 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:26:47,394 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:26:47,395 DEBUG [RS:0;c0a89b2656d4:40349 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:26:47,395 INFO [RS:0;c0a89b2656d4:40349 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:26:47,395 INFO [RS:0;c0a89b2656d4:40349 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:26:47,471 WARN [c0a89b2656d4:34529 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:26:47,506 INFO [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C40349%2C1731961606341, suffix=, logDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341, archiveDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs, maxLogs=32 2024-11-18T20:26:47,511 INFO [RS:0;c0a89b2656d4:40349 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961607511 2024-11-18T20:26:47,519 INFO [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961607511 2024-11-18T20:26:47,521 DEBUG [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:26:47,726 DEBUG [c0a89b2656d4:34529 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:26:47,739 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:47,745 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,40349,1731961606341, state=OPENING 2024-11-18T20:26:47,749 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:26:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:26:47,751 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:26:47,751 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:26:47,753 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:26:47,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40349,1731961606341}] 2024-11-18T20:26:47,933 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:26:47,936 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35193, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:26:47,946 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:26:47,947 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:26:47,951 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C40349%2C1731961606341.meta, suffix=.meta, logDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341, archiveDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs, maxLogs=32 2024-11-18T20:26:47,953 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.meta.1731961607952.meta 2024-11-18T20:26:47,961 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.meta.1731961607952.meta 2024-11-18T20:26:47,964 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:26:47,965 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:26:47,967 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:26:47,969 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:26:47,974 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:26:47,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:26:47,979 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:47,979 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:26:47,979 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:26:47,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:26:47,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:26:47,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:26:47,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:26:47,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:26:47,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:26:47,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:26:47,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:26:47,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:47,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:26:47,994 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:26:47,996 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740 2024-11-18T20:26:47,999 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740 2024-11-18T20:26:48,001 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:26:48,001 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:26:48,003 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:26:48,006 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:26:48,008 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808291, jitterRate=0.027796372771263123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:26:48,008 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:26:48,010 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961607980Writing region info on filesystem at 1731961607980Initializing all the Stores at 1731961607982 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607982Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607982Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961607982Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961607982Cleaning up temporary data from old regions at 1731961608002 (+20 ms)Running coprocessor post-open hooks at 1731961608008 (+6 ms)Region opened successfully at 1731961608009 (+1 ms) 2024-11-18T20:26:48,015 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961607923 2024-11-18T20:26:48,025 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:26:48,026 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:26:48,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:48,030 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,40349,1731961606341, state=OPEN 2024-11-18T20:26:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:26:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:26:48,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:26:48,142 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:26:48,142 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:48,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:26:48,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40349,1731961606341 in 389 msec 2024-11-18T20:26:48,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:26:48,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 846 msec 2024-11-18T20:26:48,160 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:26:48,160 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:26:48,178 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:26:48,179 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,40349,1731961606341, seqNum=-1] 2024-11-18T20:26:48,196 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:26:48,198 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:26:48,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1020 sec 2024-11-18T20:26:48,218 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961608218, completionTime=-1 2024-11-18T20:26:48,223 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:26:48,223 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:26:48,249 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:26:48,249 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961668249 2024-11-18T20:26:48,249 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961728249 2024-11-18T20:26:48,249 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 26 msec 2024-11-18T20:26:48,252 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,253 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,253 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,254 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:34529, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,255 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,255 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,261 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:26:48,283 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.812sec 2024-11-18T20:26:48,285 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:26:48,286 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:26:48,287 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:26:48,288 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:26:48,288 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:26:48,289 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:26:48,289 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:26:48,298 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:26:48,299 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:26:48,300 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34529,1731961605780-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:26:48,339 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11bd0bf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:26:48,341 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-18T20:26:48,341 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-18T20:26:48,345 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,34529,-1 for getting cluster id 2024-11-18T20:26:48,347 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:26:48,356 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd8ae44cc-d567-41bf-b268-85337b02bff6' 2024-11-18T20:26:48,359 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:26:48,360 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d8ae44cc-d567-41bf-b268-85337b02bff6" 2024-11-18T20:26:48,362 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7dfff7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:26:48,362 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,34529,-1] 2024-11-18T20:26:48,365 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:26:48,367 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:26:48,369 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:26:48,372 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695de2e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:26:48,372 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:26:48,379 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,40349,1731961606341, seqNum=-1] 2024-11-18T20:26:48,380 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:26:48,382 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:26:48,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:48,403 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:26:48,410 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:26:48,413 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:26:48,418 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c0a89b2656d4,34529,1731961605780 2024-11-18T20:26:48,422 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@66649c1a 2024-11-18T20:26:48,423 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:26:48,426 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56602, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:26:48,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:26:48,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:26:48,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:26:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:26:48,443 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:26:48,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-18T20:26:48,467 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:48,471 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:26:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:26:48,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741835_1011 (size=389) 2024-11-18T20:26:48,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741835_1011 (size=389) 2024-11-18T20:26:48,504 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e8ed5b9b24af366d33d7ef4869d469f3, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094 2024-11-18T20:26:48,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741836_1012 (size=72) 2024-11-18T20:26:48,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741836_1012 (size=72) 2024-11-18T20:26:48,516 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:48,516 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing e8ed5b9b24af366d33d7ef4869d469f3, disabling compactions & flushes 2024-11-18T20:26:48,516 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,516 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,516 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. after waiting 0 ms 2024-11-18T20:26:48,517 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,517 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,517 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e8ed5b9b24af366d33d7ef4869d469f3: Waiting for close lock at 1731961608516Disabling compacts and flushes for region at 1731961608516Disabling writes for close at 1731961608517 (+1 ms)Writing region close event to WAL at 1731961608517Closed at 1731961608517 2024-11-18T20:26:48,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:26:48,523 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731961608519"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961608519"}]},"ts":"1731961608519"} 2024-11-18T20:26:48,528 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:26:48,530 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:26:48,533 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961608531"}]},"ts":"1731961608531"} 2024-11-18T20:26:48,538 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-18T20:26:48,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=e8ed5b9b24af366d33d7ef4869d469f3, ASSIGN}] 2024-11-18T20:26:48,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=e8ed5b9b24af366d33d7ef4869d469f3, ASSIGN 2024-11-18T20:26:48,544 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=e8ed5b9b24af366d33d7ef4869d469f3, ASSIGN; state=OFFLINE, location=c0a89b2656d4,40349,1731961606341; forceNewPlan=false, retain=false 2024-11-18T20:26:48,697 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e8ed5b9b24af366d33d7ef4869d469f3, regionState=OPENING, regionLocation=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:48,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=e8ed5b9b24af366d33d7ef4869d469f3, ASSIGN because future has completed 2024-11-18T20:26:48,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8ed5b9b24af366d33d7ef4869d469f3, server=c0a89b2656d4,40349,1731961606341}] 2024-11-18T20:26:48,877 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,878 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e8ed5b9b24af366d33d7ef4869d469f3, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:26:48,878 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,878 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:26:48,878 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,878 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,881 INFO [StoreOpener-e8ed5b9b24af366d33d7ef4869d469f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,884 INFO [StoreOpener-e8ed5b9b24af366d33d7ef4869d469f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8ed5b9b24af366d33d7ef4869d469f3 columnFamilyName info 2024-11-18T20:26:48,884 DEBUG [StoreOpener-e8ed5b9b24af366d33d7ef4869d469f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:26:48,885 INFO [StoreOpener-e8ed5b9b24af366d33d7ef4869d469f3-1 {}] regionserver.HStore(327): Store=e8ed5b9b24af366d33d7ef4869d469f3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:26:48,886 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,887 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,888 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,889 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,889 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,892 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,896 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:26:48,897 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e8ed5b9b24af366d33d7ef4869d469f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698417, jitterRate=-0.11191801726818085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:26:48,897 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:26:48,899 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e8ed5b9b24af366d33d7ef4869d469f3: Running coprocessor pre-open hook at 1731961608879Writing region info on filesystem at 1731961608879Initializing all the Stores at 1731961608880 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961608880Cleaning up temporary data from old regions at 1731961608889 (+9 ms)Running coprocessor post-open hooks at 1731961608897 (+8 ms)Region opened successfully at 1731961608898 (+1 ms) 2024-11-18T20:26:48,901 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3., pid=6, masterSystemTime=1731961608864 2024-11-18T20:26:48,905 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,905 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:48,906 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e8ed5b9b24af366d33d7ef4869d469f3, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,40349,1731961606341 2024-11-18T20:26:48,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8ed5b9b24af366d33d7ef4869d469f3, server=c0a89b2656d4,40349,1731961606341 because future has completed 2024-11-18T20:26:48,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:26:48,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e8ed5b9b24af366d33d7ef4869d469f3, server=c0a89b2656d4,40349,1731961606341 in 204 msec 2024-11-18T20:26:48,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:26:48,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=e8ed5b9b24af366d33d7ef4869d469f3, ASSIGN in 378 msec 2024-11-18T20:26:48,924 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:26:48,924 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961608924"}]},"ts":"1731961608924"} 2024-11-18T20:26:48,927 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-18T20:26:48,929 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:26:48,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 494 msec 2024-11-18T20:26:53,485 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-18T20:26:53,533 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:26:53,535 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-18T20:26:56,107 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:26:56,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:26:56,112 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:26:56,112 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:26:56,114 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:26:56,114 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:26:56,115 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:26:56,115 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T20:26:58,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34529 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:26:58,522 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-18T20:26:58,527 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-18T20:26:58,535 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:26:58,536 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:26:58,536 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961618536 2024-11-18T20:26:58,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:26:58,546 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:26:58,546 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:26:58,546 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:26:58,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:26:58,547 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961607511 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961618536 2024-11-18T20:26:58,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:26:58,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961607511 is not closed yet, will try archiving it next time 2024-11-18T20:26:58,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741833_1009 (size=451) 2024-11-18T20:26:58,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741833_1009 (size=451) 2024-11-18T20:26:58,553 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961607511 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961607511 2024-11-18T20:26:58,558 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3., hostname=c0a89b2656d4,40349,1731961606341, seqNum=2] 2024-11-18T20:27:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40349 {}] regionserver.HRegion(8855): Flush requested on e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:27:10,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e8ed5b9b24af366d33d7ef4869d469f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:27:10,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/276c1474cb764f0cb40fc6b17d1f772d is 1080, key is row0001/info:/1731961618560/Put/seqid=0 2024-11-18T20:27:10,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741838_1014 (size=12509) 2024-11-18T20:27:10,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741838_1014 (size=12509) 2024-11-18T20:27:10,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/276c1474cb764f0cb40fc6b17d1f772d 2024-11-18T20:27:10,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/276c1474cb764f0cb40fc6b17d1f772d as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d 2024-11-18T20:27:10,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T20:27:10,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 135ms, sequenceid=11, compaction requested=false 2024-11-18T20:27:10,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e8ed5b9b24af366d33d7ef4869d469f3: 2024-11-18T20:27:14,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:27:18,638 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961638638 2024-11-18T20:27:18,854 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:18,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:18,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:18,855 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:18,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:18,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:18,856 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961618536 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961638638 2024-11-18T20:27:18,857 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:27:18,857 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961618536 is not closed yet, will try archiving it next time 2024-11-18T20:27:18,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741837_1013 (size=12399) 2024-11-18T20:27:18,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741837_1013 (size=12399) 2024-11-18T20:27:19,061 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:21,268 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:23,474 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:25,683 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40349 {}] regionserver.HRegion(8855): Flush requested on e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:27:25,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e8ed5b9b24af366d33d7ef4869d469f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:27:25,889 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:25,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/e64338eea7d4406b961f0aa715c7c8c3 is 1080, key is row0008/info:/1731961632616/Put/seqid=0 2024-11-18T20:27:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741840_1016 (size=12509) 2024-11-18T20:27:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741840_1016 (size=12509) 2024-11-18T20:27:26,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/e64338eea7d4406b961f0aa715c7c8c3 2024-11-18T20:27:26,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/e64338eea7d4406b961f0aa715c7c8c3 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3 2024-11-18T20:27:26,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3, entries=7, sequenceid=21, filesize=12.2 K 2024-11-18T20:27:26,542 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:26,542 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 858ms, sequenceid=21, compaction requested=false 2024-11-18T20:27:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e8ed5b9b24af366d33d7ef4869d469f3: 2024-11-18T20:27:26,543 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-18T20:27:26,544 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:27:26,546 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d because midkey is the same as first or last row 2024-11-18T20:27:27,892 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:28,782 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:27:28,782 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:27:30,103 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:30,108 WARN [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:30,110 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40349%2C1731961606341:(num 1731961638638) roll requested 2024-11-18T20:27:30,110 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961650110 2024-11-18T20:27:30,323 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:30,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:30,324 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:30,324 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:30,324 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:30,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:30,325 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961638638 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961650110 2024-11-18T20:27:30,326 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:27:30,326 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961638638 is not closed yet, will try archiving it next time 2024-11-18T20:27:30,327 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961618536 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961618536 2024-11-18T20:27:30,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741839_1015 (size=7739) 2024-11-18T20:27:30,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741839_1015 (size=7739) 2024-11-18T20:27:32,309 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:33,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e8ed5b9b24af366d33d7ef4869d469f3, had cached 0 bytes from a total of 25018 2024-11-18T20:27:34,515 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:36,719 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:38,928 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:40,932 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:27:40,932 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961660932 2024-11-18T20:27:44,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:27:45,949 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:45,952 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:45,952 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40349%2C1731961606341:(num 1731961660932) roll requested 2024-11-18T20:27:45,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:45,952 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:45,952 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:45,953 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:45,953 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:45,953 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961650110 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961660932 2024-11-18T20:27:45,954 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:27:45,954 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961650110 is not closed yet, will try archiving it next time 2024-11-18T20:27:45,955 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961665954 2024-11-18T20:27:45,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741841_1017 (size=4753) 2024-11-18T20:27:45,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741841_1017 (size=4753) 2024-11-18T20:27:50,959 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:50,959 WARN [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40349 {}] regionserver.HRegion(8855): Flush requested on e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:27:50,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e8ed5b9b24af366d33d7ef4869d469f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:27:50,972 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:50,972 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:52,961 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:27:55,964 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:55,964 WARN [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:27:55,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:55,965 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:55,965 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:55,966 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:55,966 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:27:55,967 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961660932 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961665954 2024-11-18T20:27:55,969 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:27:55,969 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961660932 is not closed yet, will try archiving it next time 2024-11-18T20:27:55,969 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40349%2C1731961606341:(num 1731961665954) roll requested 2024-11-18T20:27:55,970 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961675969 2024-11-18T20:27:55,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741842_1018 (size=1569) 2024-11-18T20:27:55,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741842_1018 (size=1569) 2024-11-18T20:27:55,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/158bcd6a420b42cfba386abd2c5950ba is 1080, key is row0015/info:/1731961647688/Put/seqid=0 2024-11-18T20:27:55,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:27:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741844_1020 (size=12509) 2024-11-18T20:27:55,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/158bcd6a420b42cfba386abd2c5950ba 2024-11-18T20:27:55,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/158bcd6a420b42cfba386abd2c5950ba as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba 2024-11-18T20:27:56,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba, entries=7, sequenceid=31, filesize=12.2 K 2024-11-18T20:28:00,987 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:28:00,987 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:28:01,004 INFO [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:28:01,005 WARN [FSHLog-0-hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094-prefix:c0a89b2656d4,40349,1731961606341 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44265,DS-665db41d-f112-4c3d-be98-9768899f1003,DISK], DatanodeInfoWithStorage[127.0.0.1:42115,DS-bb4f9828-3952-47d3-8c37-65ee8781c763,DISK]] 2024-11-18T20:28:01,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 10045ms, sequenceid=31, compaction requested=true 2024-11-18T20:28:01,005 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e8ed5b9b24af366d33d7ef4869d469f3: 2024-11-18T20:28:01,005 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,005 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-18T20:28:01,005 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:01,006 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,006 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d because midkey is the same as first or last row 2024-11-18T20:28:01,006 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,006 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961665954 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961675969 2024-11-18T20:28:01,008 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42335:42335),(127.0.0.1/127.0.0.1:36715:36715)] 2024-11-18T20:28:01,008 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961665954 is not closed yet, will try archiving it next time 2024-11-18T20:28:01,008 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961638638 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961638638 2024-11-18T20:28:01,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e8ed5b9b24af366d33d7ef4869d469f3:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:28:01,009 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40349%2C1731961606341:(num 1731961675969) roll requested 2024-11-18T20:28:01,009 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961681009 2024-11-18T20:28:01,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741843_1019 (size=438) 2024-11-18T20:28:01,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741843_1019 (size=438) 2024-11-18T20:28:01,015 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:28:01,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:28:01,015 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961650110 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961650110 2024-11-18T20:28:01,017 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961660932 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961660932 2024-11-18T20:28:01,018 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:28:01,018 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961665954 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961665954 2024-11-18T20:28:01,020 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HStore(1541): e8ed5b9b24af366d33d7ef4869d469f3/info is initiating minor compaction (all files) 2024-11-18T20:28:01,020 INFO [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e8ed5b9b24af366d33d7ef4869d469f3/info in TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:01,021 INFO [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba] into tmpdir=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp, totalSize=36.6 K 2024-11-18T20:28:01,021 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,021 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,021 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,021 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961675969 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961681009 2024-11-18T20:28:01,022 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] compactions.Compactor(225): Compacting 276c1474cb764f0cb40fc6b17d1f772d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961618560 2024-11-18T20:28:01,023 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] compactions.Compactor(225): Compacting e64338eea7d4406b961f0aa715c7c8c3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731961632616 2024-11-18T20:28:01,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741845_1021 (size=93) 2024-11-18T20:28:01,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741845_1021 (size=93) 2024-11-18T20:28:01,024 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] compactions.Compactor(225): Compacting 158bcd6a420b42cfba386abd2c5950ba, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731961647688 2024-11-18T20:28:01,024 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961675969 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs/c0a89b2656d4%2C40349%2C1731961606341.1731961675969 2024-11-18T20:28:01,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36715:36715),(127.0.0.1/127.0.0.1:42335:42335)] 2024-11-18T20:28:01,034 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40349%2C1731961606341.1731961681033 2024-11-18T20:28:01,042 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,042 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,042 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:01,043 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961681009 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961681033 2024-11-18T20:28:01,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741846_1022 (size=1258) 2024-11-18T20:28:01,045 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36715:36715),(127.0.0.1/127.0.0.1:42335:42335)] 2024-11-18T20:28:01,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741846_1022 (size=1258) 2024-11-18T20:28:01,045 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/WALs/c0a89b2656d4,40349,1731961606341/c0a89b2656d4%2C40349%2C1731961606341.1731961681009 is not closed yet, will try archiving it next time 2024-11-18T20:28:01,057 INFO [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e8ed5b9b24af366d33d7ef4869d469f3#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:28:01,058 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/01a70bf2f32d4261b1388069f9054667 is 1080, key is row0001/info:/1731961618560/Put/seqid=0 2024-11-18T20:28:01,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741848_1024 (size=27710) 2024-11-18T20:28:01,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741848_1024 (size=27710) 2024-11-18T20:28:01,076 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/01a70bf2f32d4261b1388069f9054667 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/01a70bf2f32d4261b1388069f9054667 2024-11-18T20:28:01,092 INFO [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e8ed5b9b24af366d33d7ef4869d469f3/info of e8ed5b9b24af366d33d7ef4869d469f3 into 01a70bf2f32d4261b1388069f9054667(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:28:01,092 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e8ed5b9b24af366d33d7ef4869d469f3: 2024-11-18T20:28:01,094 INFO [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3., storeName=e8ed5b9b24af366d33d7ef4869d469f3/info, priority=13, startTime=1731961681008; duration=0sec 2024-11-18T20:28:01,094 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:28:01,094 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:01,094 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/01a70bf2f32d4261b1388069f9054667 because midkey is the same as first or last row 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/01a70bf2f32d4261b1388069f9054667 because midkey is the same as first or last row 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/01a70bf2f32d4261b1388069f9054667 because midkey is the same as first or last row 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:28:01,095 DEBUG [RS:0;c0a89b2656d4:40349-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e8ed5b9b24af366d33d7ef4869d469f3:info 2024-11-18T20:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40349 {}] regionserver.HRegion(8855): Flush requested on e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:28:13,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e8ed5b9b24af366d33d7ef4869d469f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:28:13,098 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/82685c5ae8244e1284d2e5a2d483d9c4 is 1080, key is row0022/info:/1731961681035/Put/seqid=0 2024-11-18T20:28:13,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741849_1025 (size=12509) 2024-11-18T20:28:13,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741849_1025 (size=12509) 2024-11-18T20:28:13,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/82685c5ae8244e1284d2e5a2d483d9c4 2024-11-18T20:28:13,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/82685c5ae8244e1284d2e5a2d483d9c4 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/82685c5ae8244e1284d2e5a2d483d9c4 2024-11-18T20:28:13,125 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/82685c5ae8244e1284d2e5a2d483d9c4, entries=7, sequenceid=42, filesize=12.2 K 2024-11-18T20:28:13,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 40ms, sequenceid=42, compaction requested=false 2024-11-18T20:28:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e8ed5b9b24af366d33d7ef4869d469f3: 2024-11-18T20:28:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-18T20:28:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:13,127 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/01a70bf2f32d4261b1388069f9054667 because midkey is the same as first or last row 2024-11-18T20:28:14,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:28:18,879 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e8ed5b9b24af366d33d7ef4869d469f3, had cached 0 bytes from a total of 40219 2024-11-18T20:28:21,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:28:21,109 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:28:21,110 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:21,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:21,123 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:21,123 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:28:21,123 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:28:21,123 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1349950594, stopped=false 2024-11-18T20:28:21,123 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,34529,1731961605780 2024-11-18T20:28:21,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:21,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:21,125 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:28:21,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:21,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:21,125 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:28:21,125 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:21,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:21,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:21,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:21,125 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,40349,1731961606341' ***** 2024-11-18T20:28:21,126 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:28:21,126 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:28:21,126 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:28:21,126 INFO [RS:0;c0a89b2656d4:40349 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:28:21,126 INFO [RS:0;c0a89b2656d4:40349 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:28:21,126 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(3091): Received CLOSE for e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,40349,1731961606341 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:40349. 2024-11-18T20:28:21,127 DEBUG [RS:0;c0a89b2656d4:40349 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:21,127 DEBUG [RS:0;c0a89b2656d4:40349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:28:21,127 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e8ed5b9b24af366d33d7ef4869d469f3, disabling compactions & flushes 2024-11-18T20:28:21,127 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:28:21,127 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:21,127 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. after waiting 0 ms 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:21,128 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:28:21,128 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e8ed5b9b24af366d33d7ef4869d469f3 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T20:28:21,128 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1325): Online Regions={e8ed5b9b24af366d33d7ef4869d469f3=TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:28:21,128 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:28:21,128 DEBUG [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e8ed5b9b24af366d33d7ef4869d469f3 2024-11-18T20:28:21,128 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:28:21,128 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-18T20:28:21,134 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/50052ded8e954de9a725be78faadf39a is 1080, key is row0029/info:/1731961695092/Put/seqid=0 2024-11-18T20:28:21,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741850_1026 (size=8193) 2024-11-18T20:28:21,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741850_1026 (size=8193) 2024-11-18T20:28:21,142 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/50052ded8e954de9a725be78faadf39a 2024-11-18T20:28:21,152 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/info/09a46732343a422cb95726654997c759 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3./info:regioninfo/1731961608906/Put/seqid=0 2024-11-18T20:28:21,152 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/.tmp/info/50052ded8e954de9a725be78faadf39a as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/50052ded8e954de9a725be78faadf39a 2024-11-18T20:28:21,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741851_1027 (size=7016) 2024-11-18T20:28:21,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741851_1027 (size=7016) 2024-11-18T20:28:21,160 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/info/09a46732343a422cb95726654997c759 2024-11-18T20:28:21,161 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/50052ded8e954de9a725be78faadf39a, entries=3, sequenceid=48, filesize=8.0 K 2024-11-18T20:28:21,163 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 35ms, sequenceid=48, compaction requested=true 2024-11-18T20:28:21,164 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba] to archive 2024-11-18T20:28:21,167 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:28:21,171 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/archive/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/276c1474cb764f0cb40fc6b17d1f772d 2024-11-18T20:28:21,173 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3 to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/archive/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/e64338eea7d4406b961f0aa715c7c8c3 2024-11-18T20:28:21,176 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/archive/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/info/158bcd6a420b42cfba386abd2c5950ba 2024-11-18T20:28:21,189 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/ns/03595797cdee4561ac3abba5d2ae2b66 is 43, key is default/ns:d/1731961608202/Put/seqid=0 2024-11-18T20:28:21,189 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c0a89b2656d4:34529 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:28:21,194 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [276c1474cb764f0cb40fc6b17d1f772d=12509, e64338eea7d4406b961f0aa715c7c8c3=12509, 158bcd6a420b42cfba386abd2c5950ba=12509] 2024-11-18T20:28:21,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741852_1028 (size=5153) 2024-11-18T20:28:21,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741852_1028 (size=5153) 2024-11-18T20:28:21,196 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/ns/03595797cdee4561ac3abba5d2ae2b66 2024-11-18T20:28:21,200 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/default/TestLogRolling-testSlowSyncLogRolling/e8ed5b9b24af366d33d7ef4869d469f3/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-18T20:28:21,202 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:21,203 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e8ed5b9b24af366d33d7ef4869d469f3: Waiting for close lock at 1731961701127Running coprocessor pre-close hooks at 1731961701127Disabling compacts and flushes for region at 1731961701127Disabling writes for close at 1731961701128 (+1 ms)Obtaining lock to block concurrent updates at 1731961701128Preparing flush snapshotting stores in e8ed5b9b24af366d33d7ef4869d469f3 at 1731961701128Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731961701128Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. at 1731961701129 (+1 ms)Flushing e8ed5b9b24af366d33d7ef4869d469f3/info: creating writer at 1731961701129Flushing e8ed5b9b24af366d33d7ef4869d469f3/info: appending metadata at 1731961701134 (+5 ms)Flushing e8ed5b9b24af366d33d7ef4869d469f3/info: closing flushed file at 1731961701134Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38cd83cb: reopening flushed file at 1731961701151 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for e8ed5b9b24af366d33d7ef4869d469f3 in 35ms, sequenceid=48, compaction requested=true at 1731961701163 (+12 ms)Writing region close event to WAL at 1731961701194 (+31 ms)Running coprocessor post-close hooks at 1731961701201 (+7 ms)Closed at 1731961701202 (+1 ms) 2024-11-18T20:28:21,203 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731961608428.e8ed5b9b24af366d33d7ef4869d469f3. 2024-11-18T20:28:21,221 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/table/65b6476e8106400a8edd3ee631b9fa3e is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731961608924/Put/seqid=0 2024-11-18T20:28:21,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741853_1029 (size=5396) 2024-11-18T20:28:21,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741853_1029 (size=5396) 2024-11-18T20:28:21,228 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/table/65b6476e8106400a8edd3ee631b9fa3e 2024-11-18T20:28:21,236 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/info/09a46732343a422cb95726654997c759 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/info/09a46732343a422cb95726654997c759 2024-11-18T20:28:21,245 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/info/09a46732343a422cb95726654997c759, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T20:28:21,246 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/ns/03595797cdee4561ac3abba5d2ae2b66 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/ns/03595797cdee4561ac3abba5d2ae2b66 2024-11-18T20:28:21,255 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/ns/03595797cdee4561ac3abba5d2ae2b66, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:28:21,256 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/.tmp/table/65b6476e8106400a8edd3ee631b9fa3e as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/table/65b6476e8106400a8edd3ee631b9fa3e 2024-11-18T20:28:21,265 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/table/65b6476e8106400a8edd3ee631b9fa3e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:28:21,266 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false 2024-11-18T20:28:21,272 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:28:21,273 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:28:21,273 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:21,274 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961701128Running coprocessor pre-close hooks at 1731961701128Disabling compacts and flushes for region at 1731961701128Disabling writes for close at 1731961701128Obtaining lock to block concurrent updates at 1731961701128Preparing flush snapshotting stores in 1588230740 at 1731961701128Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731961701129 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731961701130 (+1 ms)Flushing 1588230740/info: creating writer at 1731961701130Flushing 1588230740/info: appending metadata at 1731961701151 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731961701151Flushing 1588230740/ns: creating writer at 1731961701168 (+17 ms)Flushing 1588230740/ns: appending metadata at 1731961701188 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731961701188Flushing 1588230740/table: creating writer at 1731961701205 (+17 ms)Flushing 1588230740/table: appending metadata at 1731961701221 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731961701221Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ad50ecc: reopening flushed file at 1731961701235 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8678a01: reopening flushed file at 1731961701245 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bfb896e: reopening flushed file at 1731961701255 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=11, compaction requested=false at 1731961701266 (+11 ms)Writing region close event to WAL at 1731961701268 (+2 ms)Running coprocessor post-close hooks at 1731961701273 (+5 ms)Closed at 1731961701273 2024-11-18T20:28:21,274 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:21,328 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,40349,1731961606341; all regions closed. 2024-11-18T20:28:21,330 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,331 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741834_1010 (size=3066) 2024-11-18T20:28:21,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741834_1010 (size=3066) 2024-11-18T20:28:21,337 DEBUG [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs 2024-11-18T20:28:21,338 INFO [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C40349%2C1731961606341.meta:.meta(num 1731961607952) 2024-11-18T20:28:21,338 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,338 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,338 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,338 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,339 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741847_1023 (size=12695) 2024-11-18T20:28:21,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741847_1023 (size=12695) 2024-11-18T20:28:21,357 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:28:21,357 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:28:21,360 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:28:21,748 DEBUG [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/oldWALs 2024-11-18T20:28:21,748 INFO [RS:0;c0a89b2656d4:40349 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C40349%2C1731961606341:(num 1731961681033) 2024-11-18T20:28:21,748 DEBUG [RS:0;c0a89b2656d4:40349 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:21,748 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:28:21,749 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:28:21,749 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:28:21,749 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:28:21,749 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:28:21,750 INFO [RS:0;c0a89b2656d4:40349 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40349 2024-11-18T20:28:21,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,40349,1731961606341 2024-11-18T20:28:21,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:28:21,754 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:28:21,755 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,40349,1731961606341] 2024-11-18T20:28:21,756 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,40349,1731961606341 already deleted, retry=false 2024-11-18T20:28:21,756 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,40349,1731961606341 expired; onlineServers=0 2024-11-18T20:28:21,756 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,34529,1731961605780' ***** 2024-11-18T20:28:21,756 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:28:21,756 INFO [M:0;c0a89b2656d4:34529 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:28:21,756 INFO [M:0;c0a89b2656d4:34529 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:28:21,756 DEBUG [M:0;c0a89b2656d4:34529 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:28:21,757 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:28:21,757 DEBUG [M:0;c0a89b2656d4:34529 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:28:21,757 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961607215 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961607215,5,FailOnTimeoutGroup] 2024-11-18T20:28:21,757 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961607211 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961607211,5,FailOnTimeoutGroup] 2024-11-18T20:28:21,757 INFO [M:0;c0a89b2656d4:34529 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:28:21,757 INFO [M:0;c0a89b2656d4:34529 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:28:21,757 DEBUG [M:0;c0a89b2656d4:34529 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:28:21,757 INFO [M:0;c0a89b2656d4:34529 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:28:21,757 INFO [M:0;c0a89b2656d4:34529 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:28:21,758 INFO [M:0;c0a89b2656d4:34529 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:28:21,758 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:28:21,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:28:21,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:21,759 DEBUG [M:0;c0a89b2656d4:34529 {}] zookeeper.ZKUtil(347): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:28:21,759 WARN [M:0;c0a89b2656d4:34529 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:28:21,760 INFO [M:0;c0a89b2656d4:34529 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/.lastflushedseqids 2024-11-18T20:28:21,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741854_1030 (size=130) 2024-11-18T20:28:21,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741854_1030 (size=130) 2024-11-18T20:28:21,772 INFO [M:0;c0a89b2656d4:34529 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:28:21,772 INFO [M:0;c0a89b2656d4:34529 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:28:21,772 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:28:21,772 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:21,772 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:21,772 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:28:21,772 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:21,773 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-18T20:28:21,799 DEBUG [M:0;c0a89b2656d4:34529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dfdec39df1e4b5485608b87bc1c092e is 82, key is hbase:meta,,1/info:regioninfo/1731961608027/Put/seqid=0 2024-11-18T20:28:21,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741855_1031 (size=5672) 2024-11-18T20:28:21,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741855_1031 (size=5672) 2024-11-18T20:28:21,806 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dfdec39df1e4b5485608b87bc1c092e 2024-11-18T20:28:21,828 DEBUG [M:0;c0a89b2656d4:34529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7238947195a242979c73a49f2b173f88 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961608931/Put/seqid=0 2024-11-18T20:28:21,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741856_1032 (size=6247) 2024-11-18T20:28:21,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741856_1032 (size=6247) 2024-11-18T20:28:21,834 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7238947195a242979c73a49f2b173f88 2024-11-18T20:28:21,840 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7238947195a242979c73a49f2b173f88 2024-11-18T20:28:21,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:21,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40349-0x100548427de0001, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:21,856 INFO [RS:0;c0a89b2656d4:40349 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:28:21,857 INFO [RS:0;c0a89b2656d4:40349 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,40349,1731961606341; zookeeper connection closed. 2024-11-18T20:28:21,857 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@40dc7e36 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@40dc7e36 2024-11-18T20:28:21,857 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:28:21,863 DEBUG [M:0;c0a89b2656d4:34529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99abccc4d4c4a05aafc871f8baa1ae3 is 69, key is c0a89b2656d4,40349,1731961606341/rs:state/1731961607267/Put/seqid=0 2024-11-18T20:28:21,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741857_1033 (size=5156) 2024-11-18T20:28:21,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741857_1033 (size=5156) 2024-11-18T20:28:21,869 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99abccc4d4c4a05aafc871f8baa1ae3 2024-11-18T20:28:21,895 DEBUG [M:0;c0a89b2656d4:34529 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/adf91da7536d4f67b8951ce92d199dc5 is 52, key is load_balancer_on/state:d/1731961608407/Put/seqid=0 2024-11-18T20:28:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741858_1034 (size=5056) 2024-11-18T20:28:21,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741858_1034 (size=5056) 2024-11-18T20:28:21,902 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/adf91da7536d4f67b8951ce92d199dc5 2024-11-18T20:28:21,908 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9dfdec39df1e4b5485608b87bc1c092e as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9dfdec39df1e4b5485608b87bc1c092e 2024-11-18T20:28:21,915 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9dfdec39df1e4b5485608b87bc1c092e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-18T20:28:21,917 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7238947195a242979c73a49f2b173f88 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7238947195a242979c73a49f2b173f88 2024-11-18T20:28:21,926 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7238947195a242979c73a49f2b173f88 2024-11-18T20:28:21,926 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7238947195a242979c73a49f2b173f88, entries=6, sequenceid=59, filesize=6.1 K 2024-11-18T20:28:21,928 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d99abccc4d4c4a05aafc871f8baa1ae3 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d99abccc4d4c4a05aafc871f8baa1ae3 2024-11-18T20:28:21,936 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d99abccc4d4c4a05aafc871f8baa1ae3, entries=1, sequenceid=59, filesize=5.0 K 2024-11-18T20:28:21,937 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/adf91da7536d4f67b8951ce92d199dc5 as hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/adf91da7536d4f67b8951ce92d199dc5 2024-11-18T20:28:21,946 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/adf91da7536d4f67b8951ce92d199dc5, entries=1, sequenceid=59, filesize=4.9 K 2024-11-18T20:28:21,948 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false 2024-11-18T20:28:21,949 INFO [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:21,950 DEBUG [M:0;c0a89b2656d4:34529 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961701772Disabling compacts and flushes for region at 1731961701772Disabling writes for close at 1731961701772Obtaining lock to block concurrent updates at 1731961701773 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961701773Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731961701773Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961701774 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961701774Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961701799 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961701799Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961701813 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961701827 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961701827Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961701841 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961701862 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961701862Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961701876 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961701895 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961701895Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25293eb1: reopening flushed file at 1731961701907 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735982e0: reopening flushed file at 1731961701916 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@736759aa: reopening flushed file at 1731961701926 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3acb4305: reopening flushed file at 1731961701936 (+10 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=59, compaction requested=false at 1731961701948 (+12 ms)Writing region close event to WAL at 1731961701949 (+1 ms)Closed at 1731961701949 2024-11-18T20:28:21,951 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:21,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741830_1006 (size=27973) 2024-11-18T20:28:21,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42115 is added to blk_1073741830_1006 (size=27973) 2024-11-18T20:28:21,954 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:28:21,954 INFO [M:0;c0a89b2656d4:34529 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:28:21,954 INFO [M:0;c0a89b2656d4:34529 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34529 2024-11-18T20:28:21,955 INFO [M:0;c0a89b2656d4:34529 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:28:22,056 INFO [M:0;c0a89b2656d4:34529 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:28:22,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:22,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34529-0x100548427de0000, quorum=127.0.0.1:65026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:22,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:22,067 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:22,067 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:22,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:22,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:22,071 WARN [BP-2064152689-172.17.0.2-1731961603167 heartbeating to localhost/127.0.0.1:35383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:22,071 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:22,071 WARN [BP-2064152689-172.17.0.2-1731961603167 heartbeating to localhost/127.0.0.1:35383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2064152689-172.17.0.2-1731961603167 (Datanode Uuid 9b0ee407-939a-467a-982b-e94bef1f3c54) service to localhost/127.0.0.1:35383 2024-11-18T20:28:22,071 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:22,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data3/current/BP-2064152689-172.17.0.2-1731961603167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:22,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data4/current/BP-2064152689-172.17.0.2-1731961603167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:22,073 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:22,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:22,076 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:22,076 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:22,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:22,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:22,078 WARN [BP-2064152689-172.17.0.2-1731961603167 heartbeating to localhost/127.0.0.1:35383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:22,078 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:22,078 WARN [BP-2064152689-172.17.0.2-1731961603167 heartbeating to localhost/127.0.0.1:35383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2064152689-172.17.0.2-1731961603167 (Datanode Uuid db69b180-a8ba-441d-816b-fe4377c894e2) service to localhost/127.0.0.1:35383 2024-11-18T20:28:22,078 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:22,078 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data1/current/BP-2064152689-172.17.0.2-1731961603167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:22,079 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/cluster_47b9219f-2b6e-93a3-8baa-0a980b128d56/data/data2/current/BP-2064152689-172.17.0.2-1731961603167 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:22,079 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:22,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:28:22,091 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:22,091 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:22,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:22,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:22,099 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:28:22,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:28:22,139 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/c0a89b2656d4:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c0a89b2656d4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35383 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/c0a89b2656d4:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:35383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@11446de8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:35383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=68 (was 281), ProcessCount=11 (was 11), AvailableMemoryMB=3110 (was 3680) 2024-11-18T20:28:22,145 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=68, ProcessCount=11, AvailableMemoryMB=3109 2024-11-18T20:28:22,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.log.dir so I do NOT create it in target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6f4fc531-abfd-97a8-916d-3b0117575edc/hadoop.tmp.dir so I do NOT create it in target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194, deleteOnExit=true 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/test.cache.data in system properties and HBase conf 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:28:22,146 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:28:22,146 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:28:22,147 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:28:22,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:28:22,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:28:22,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:28:22,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:28:22,163 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:28:22,217 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:22,223 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:22,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:22,224 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:22,224 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:28:22,224 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:22,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ceaf06d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:22,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15baacc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:22,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e033196{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/java.io.tmpdir/jetty-localhost-36777-hadoop-hdfs-3_4_1-tests_jar-_-any-18260352285130603495/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:28:22,328 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3bde2993{HTTP/1.1, (http/1.1)}{localhost:36777} 2024-11-18T20:28:22,328 INFO [Time-limited test {}] server.Server(415): Started @101023ms 2024-11-18T20:28:22,341 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:28:22,388 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:22,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:22,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:22,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:22,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:22,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24bc4d6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:22,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@304195bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:22,491 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@383a5779{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/java.io.tmpdir/jetty-localhost-35137-hadoop-hdfs-3_4_1-tests_jar-_-any-15199090518911564555/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:22,492 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58220e1e{HTTP/1.1, (http/1.1)}{localhost:35137} 2024-11-18T20:28:22,492 INFO [Time-limited test {}] server.Server(415): Started @101187ms 2024-11-18T20:28:22,494 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:22,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:22,534 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:22,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:22,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:22,535 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:22,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69c2cb49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:22,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66361119{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:22,558 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data2/current/BP-2016463703-172.17.0.2-1731961702178/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:22,558 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data1/current/BP-2016463703-172.17.0.2-1731961702178/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:22,574 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:22,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf789db6a452dba3 with lease ID 0x64295e4c3d0c5da1: Processing first storage report for DS-86c3bf9e-4645-4f34-a858-5120d19accad from datanode DatanodeRegistration(127.0.0.1:38061, datanodeUuid=cef3fb04-c061-40ba-9322-4f2d86d1e64e, infoPort=46683, infoSecurePort=0, ipcPort=42995, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178) 2024-11-18T20:28:22,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf789db6a452dba3 with lease ID 0x64295e4c3d0c5da1: from storage DS-86c3bf9e-4645-4f34-a858-5120d19accad node DatanodeRegistration(127.0.0.1:38061, datanodeUuid=cef3fb04-c061-40ba-9322-4f2d86d1e64e, infoPort=46683, infoSecurePort=0, ipcPort=42995, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:22,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf789db6a452dba3 with lease ID 0x64295e4c3d0c5da1: Processing first storage report for DS-28d449f7-074b-49fe-a274-4e6d8c9c19bf from datanode DatanodeRegistration(127.0.0.1:38061, datanodeUuid=cef3fb04-c061-40ba-9322-4f2d86d1e64e, infoPort=46683, infoSecurePort=0, ipcPort=42995, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178) 2024-11-18T20:28:22,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf789db6a452dba3 with lease ID 0x64295e4c3d0c5da1: from storage DS-28d449f7-074b-49fe-a274-4e6d8c9c19bf node DatanodeRegistration(127.0.0.1:38061, datanodeUuid=cef3fb04-c061-40ba-9322-4f2d86d1e64e, infoPort=46683, infoSecurePort=0, ipcPort=42995, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:22,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68317950{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/java.io.tmpdir/jetty-localhost-39075-hadoop-hdfs-3_4_1-tests_jar-_-any-462098975118510089/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:22,634 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@587d64cb{HTTP/1.1, (http/1.1)}{localhost:39075} 2024-11-18T20:28:22,634 INFO [Time-limited test {}] server.Server(415): Started @101329ms 2024-11-18T20:28:22,636 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:22,698 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data3/current/BP-2016463703-172.17.0.2-1731961702178/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:22,698 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data4/current/BP-2016463703-172.17.0.2-1731961702178/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:22,715 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3fb36ae20278218 with lease ID 0x64295e4c3d0c5da2: Processing first storage report for DS-a23f2c26-dd5a-4d26-8074-737c2467e607 from datanode DatanodeRegistration(127.0.0.1:44809, datanodeUuid=8afcdf2f-54f3-427d-8328-c315aec4ef3d, infoPort=44661, infoSecurePort=0, ipcPort=42547, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178) 2024-11-18T20:28:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3fb36ae20278218 with lease ID 0x64295e4c3d0c5da2: from storage DS-a23f2c26-dd5a-4d26-8074-737c2467e607 node DatanodeRegistration(127.0.0.1:44809, datanodeUuid=8afcdf2f-54f3-427d-8328-c315aec4ef3d, infoPort=44661, infoSecurePort=0, ipcPort=42547, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3fb36ae20278218 with lease ID 0x64295e4c3d0c5da2: Processing first storage report for DS-bb0405eb-7a24-47b2-8522-653d149b9920 from datanode DatanodeRegistration(127.0.0.1:44809, datanodeUuid=8afcdf2f-54f3-427d-8328-c315aec4ef3d, infoPort=44661, infoSecurePort=0, ipcPort=42547, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178) 2024-11-18T20:28:22,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3fb36ae20278218 with lease ID 0x64295e4c3d0c5da2: from storage DS-bb0405eb-7a24-47b2-8522-653d149b9920 node DatanodeRegistration(127.0.0.1:44809, datanodeUuid=8afcdf2f-54f3-427d-8328-c315aec4ef3d, infoPort=44661, infoSecurePort=0, ipcPort=42547, storageInfo=lv=-57;cid=testClusterID;nsid=534313738;c=1731961702178), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:22,763 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54 2024-11-18T20:28:22,768 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/zookeeper_0, clientPort=53039, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:28:22,769 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53039 2024-11-18T20:28:22,770 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:28:22,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:28:22,789 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7 with version=8 2024-11-18T20:28:22,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:28:22,792 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:28:22,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,792 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:28:22,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,793 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:28:22,793 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:28:22,793 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:28:22,794 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40277 2024-11-18T20:28:22,796 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40277 connecting to ZooKeeper ensemble=127.0.0.1:53039 2024-11-18T20:28:22,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402770x0, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:28:22,800 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40277-0x1005485a5dd0000 connected 2024-11-18T20:28:22,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,819 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:22,819 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7, hbase.cluster.distributed=false 2024-11-18T20:28:22,821 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:28:22,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40277 2024-11-18T20:28:22,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40277 2024-11-18T20:28:22,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40277 2024-11-18T20:28:22,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40277 2024-11-18T20:28:22,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40277 2024-11-18T20:28:22,842 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:28:22,842 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:28:22,843 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34301 2024-11-18T20:28:22,845 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34301 connecting to ZooKeeper ensemble=127.0.0.1:53039 2024-11-18T20:28:22,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343010x0, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:28:22,851 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:343010x0, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:22,851 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34301-0x1005485a5dd0001 connected 2024-11-18T20:28:22,851 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:28:22,852 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:28:22,853 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:28:22,854 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:28:22,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34301 2024-11-18T20:28:22,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34301 2024-11-18T20:28:22,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34301 2024-11-18T20:28:22,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34301 2024-11-18T20:28:22,855 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34301 2024-11-18T20:28:22,867 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:40277 2024-11-18T20:28:22,867 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:22,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:22,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:22,869 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:22,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:28:22,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,870 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:28:22,871 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,40277,1731961702792 from backup master directory 2024-11-18T20:28:22,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:22,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:22,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:22,872 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:28:22,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:22,880 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/hbase.id] with ID: 09819974-aa2d-490c-a6c8-b8b5df2031d0 2024-11-18T20:28:22,881 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/.tmp/hbase.id 2024-11-18T20:28:22,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:28:22,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:28:22,890 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/.tmp/hbase.id]:[hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/hbase.id] 2024-11-18T20:28:22,906 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:22,906 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:28:22,908 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:28:22,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:28:22,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:28:22,920 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:28:22,921 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:28:22,921 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:22,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:28:22,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:28:22,932 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store 2024-11-18T20:28:22,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:28:22,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:28:22,940 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:22,940 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:28:22,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:22,940 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:22,940 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:28:22,940 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:22,941 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:22,941 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961702940Disabling compacts and flushes for region at 1731961702940Disabling writes for close at 1731961702940Writing region close event to WAL at 1731961702941 (+1 ms)Closed at 1731961702941 2024-11-18T20:28:22,942 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/.initializing 2024-11-18T20:28:22,942 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/WALs/c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:22,945 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C40277%2C1731961702792, suffix=, logDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/WALs/c0a89b2656d4,40277,1731961702792, archiveDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/oldWALs, maxLogs=10 2024-11-18T20:28:22,946 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40277%2C1731961702792.1731961702945 2024-11-18T20:28:22,951 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/WALs/c0a89b2656d4,40277,1731961702792/c0a89b2656d4%2C40277%2C1731961702792.1731961702945 2024-11-18T20:28:22,956 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:46683:46683)] 2024-11-18T20:28:22,956 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:28:22,956 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:22,957 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,957 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:28:22,960 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:22,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:22,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:28:22,963 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:22,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:22,963 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:28:22,966 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:22,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:22,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:28:22,968 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:22,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:22,969 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,970 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,970 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,972 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,972 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,972 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:28:22,974 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:22,976 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:28:22,976 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805821, jitterRate=0.024655476212501526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:28:22,977 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961702957Initializing all the Stores at 1731961702958 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961702958Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961702958Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961702958Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961702958Cleaning up temporary data from old regions at 1731961702972 (+14 ms)Region opened successfully at 1731961702977 (+5 ms) 2024-11-18T20:28:22,978 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:28:22,982 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48938949, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:28:22,983 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:28:22,983 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:28:22,983 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:28:22,983 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:28:22,984 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:28:22,984 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:28:22,984 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:28:22,987 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:28:22,988 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:28:22,989 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:28:22,989 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:28:22,990 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:28:22,991 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:28:22,991 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:28:22,992 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:28:22,993 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:28:22,994 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:28:22,995 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:28:22,997 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:28:22,998 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:28:22,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:22,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:22,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:22,999 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,40277,1731961702792, sessionid=0x1005485a5dd0000, setting cluster-up flag (Was=false) 2024-11-18T20:28:23,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,004 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:28:23,005 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:23,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,011 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:28:23,012 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:23,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:28:23,016 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:23,017 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:28:23,017 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:28:23,017 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,40277,1731961702792 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:28:23,019 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:23,019 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:23,019 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:23,020 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:23,020 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:28:23,020 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,020 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:28:23,020 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961733024 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:28:23,024 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:28:23,025 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,025 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:28:23,025 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:23,025 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:28:23,025 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:28:23,025 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:28:23,026 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,026 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:28:23,027 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:28:23,027 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:28:23,028 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961703027,5,FailOnTimeoutGroup] 2024-11-18T20:28:23,028 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961703028,5,FailOnTimeoutGroup] 2024-11-18T20:28:23,028 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,028 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:28:23,028 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,028 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:28:23,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:28:23,035 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:28:23,035 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7 2024-11-18T20:28:23,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:28:23,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:28:23,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:23,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:28:23,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:28:23,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:28:23,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:28:23,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:28:23,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:28:23,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:28:23,055 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:28:23,055 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:28:23,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740 2024-11-18T20:28:23,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740 2024-11-18T20:28:23,058 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(746): ClusterId : 09819974-aa2d-490c-a6c8-b8b5df2031d0 2024-11-18T20:28:23,058 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:28:23,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:28:23,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:28:23,060 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:28:23,060 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:28:23,060 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:28:23,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:28:23,062 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:28:23,063 DEBUG [RS:0;c0a89b2656d4:34301 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@765e5e72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:28:23,064 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:28:23,064 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792689, jitterRate=0.007956624031066895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:28:23,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961703045Initializing all the Stores at 1731961703046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703046Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703047 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961703047Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703047Cleaning up temporary data from old regions at 1731961703059 (+12 ms)Region opened successfully at 1731961703065 (+6 ms) 2024-11-18T20:28:23,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:28:23,066 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:28:23,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:28:23,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:28:23,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:28:23,070 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:23,070 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961703066Disabling compacts and flushes for region at 1731961703066Disabling writes for close at 1731961703066Writing region close event to WAL at 1731961703070 (+4 ms)Closed at 1731961703070 2024-11-18T20:28:23,072 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:23,072 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:28:23,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:28:23,073 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:28:23,075 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:28:23,081 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:34301 2024-11-18T20:28:23,081 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:28:23,081 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:28:23,081 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:28:23,082 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,40277,1731961702792 with port=34301, startcode=1731961702842 2024-11-18T20:28:23,082 DEBUG [RS:0;c0a89b2656d4:34301 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:28:23,085 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36505, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:28:23,086 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40277 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,086 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40277 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,088 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7 2024-11-18T20:28:23,089 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33599 2024-11-18T20:28:23,089 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:28:23,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:28:23,091 DEBUG [RS:0;c0a89b2656d4:34301 {}] zookeeper.ZKUtil(111): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,091 WARN [RS:0;c0a89b2656d4:34301 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:28:23,091 INFO [RS:0;c0a89b2656d4:34301 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:23,091 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/WALs/c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,094 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,34301,1731961702842] 2024-11-18T20:28:23,097 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:28:23,099 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:28:23,099 INFO [RS:0;c0a89b2656d4:34301 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:28:23,099 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,099 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:28:23,100 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:28:23,101 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,101 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,102 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:23,102 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:23,102 DEBUG [RS:0;c0a89b2656d4:34301 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,105 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34301,1731961702842-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:28:23,119 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:28:23,119 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34301,1731961702842-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,119 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,119 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.Replication(171): c0a89b2656d4,34301,1731961702842 started 2024-11-18T20:28:23,132 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,133 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,34301,1731961702842, RpcServer on c0a89b2656d4/172.17.0.2:34301, sessionid=0x1005485a5dd0001 2024-11-18T20:28:23,133 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:28:23,133 DEBUG [RS:0;c0a89b2656d4:34301 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,133 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,34301,1731961702842' 2024-11-18T20:28:23,133 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,34301,1731961702842' 2024-11-18T20:28:23,134 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:28:23,135 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:28:23,135 DEBUG [RS:0;c0a89b2656d4:34301 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:28:23,135 INFO [RS:0;c0a89b2656d4:34301 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:28:23,136 INFO [RS:0;c0a89b2656d4:34301 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:28:23,225 WARN [c0a89b2656d4:40277 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:28:23,240 INFO [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C34301%2C1731961702842, suffix=, logDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/WALs/c0a89b2656d4,34301,1731961702842, archiveDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/oldWALs, maxLogs=32 2024-11-18T20:28:23,246 INFO [RS:0;c0a89b2656d4:34301 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34301%2C1731961702842.1731961703245 2024-11-18T20:28:23,252 INFO [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/WALs/c0a89b2656d4,34301,1731961702842/c0a89b2656d4%2C34301%2C1731961702842.1731961703245 2024-11-18T20:28:23,253 DEBUG [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44661:44661),(127.0.0.1/127.0.0.1:46683:46683)] 2024-11-18T20:28:23,476 DEBUG [c0a89b2656d4:40277 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:28:23,477 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,481 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,34301,1731961702842, state=OPENING 2024-11-18T20:28:23,483 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:28:23,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,487 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:28:23,487 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:23,487 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:23,487 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34301,1731961702842}] 2024-11-18T20:28:23,643 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:28:23,650 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54195, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:28:23,657 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:28:23,657 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:23,659 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C34301%2C1731961702842.meta, suffix=.meta, logDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/WALs/c0a89b2656d4,34301,1731961702842, archiveDir=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/oldWALs, maxLogs=32 2024-11-18T20:28:23,661 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34301%2C1731961702842.meta.1731961703661.meta 2024-11-18T20:28:23,667 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/WALs/c0a89b2656d4,34301,1731961702842/c0a89b2656d4%2C34301%2C1731961702842.meta.1731961703661.meta 2024-11-18T20:28:23,668 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46683:46683),(127.0.0.1/127.0.0.1:44661:44661)] 2024-11-18T20:28:23,669 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:28:23,669 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:28:23,669 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:28:23,670 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:28:23,670 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:28:23,670 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:23,670 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:28:23,670 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:28:23,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:28:23,673 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:28:23,673 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:28:23,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:28:23,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:28:23,676 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:28:23,676 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:28:23,678 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:28:23,678 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:23,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:23,678 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:28:23,679 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740 2024-11-18T20:28:23,681 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740 2024-11-18T20:28:23,682 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:28:23,682 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:28:23,683 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:28:23,684 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:28:23,685 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738196, jitterRate=-0.06133611500263214}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:28:23,685 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:28:23,686 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961703670Writing region info on filesystem at 1731961703670Initializing all the Stores at 1731961703671 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703671Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703672 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961703672Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961703672Cleaning up temporary data from old regions at 1731961703682 (+10 ms)Running coprocessor post-open hooks at 1731961703685 (+3 ms)Region opened successfully at 1731961703686 (+1 ms) 2024-11-18T20:28:23,687 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961703642 2024-11-18T20:28:23,690 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:28:23,690 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:28:23,691 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,692 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,34301,1731961702842, state=OPEN 2024-11-18T20:28:23,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:28:23,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:28:23,695 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,695 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:23,695 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:23,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:28:23,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34301,1731961702842 in 208 msec 2024-11-18T20:28:23,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:28:23,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 626 msec 2024-11-18T20:28:23,702 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:23,702 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:28:23,704 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:28:23,704 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,34301,1731961702842, seqNum=-1] 2024-11-18T20:28:23,704 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:28:23,706 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:28:23,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 697 msec 2024-11-18T20:28:23,713 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961703713, completionTime=-1 2024-11-18T20:28:23,713 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:28:23,713 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961763716 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961823716 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:40277, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,716 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,717 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,719 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:28:23,721 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.849sec 2024-11-18T20:28:23,721 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:28:23,721 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:28:23,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:28:23,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:28:23,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:28:23,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:28:23,722 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:28:23,724 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:28:23,724 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:28:23,724 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40277,1731961702792-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:23,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b9cb8ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:23,758 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,40277,-1 for getting cluster id 2024-11-18T20:28:23,758 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:28:23,760 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '09819974-aa2d-490c-a6c8-b8b5df2031d0' 2024-11-18T20:28:23,760 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:28:23,760 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "09819974-aa2d-490c-a6c8-b8b5df2031d0" 2024-11-18T20:28:23,761 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300aa63e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:23,761 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,40277,-1] 2024-11-18T20:28:23,761 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:28:23,761 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,763 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54512, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:28:23,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ea4d062, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:23,764 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:28:23,766 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,34301,1731961702842, seqNum=-1] 2024-11-18T20:28:23,767 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:28:23,769 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35492, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:28:23,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:23,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:23,776 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:28:23,776 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:28:23,776 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:28:23,776 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:23,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,777 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:28:23,777 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:28:23,777 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1891077224, stopped=false 2024-11-18T20:28:23,777 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,40277,1731961702792 2024-11-18T20:28:23,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:23,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:23,778 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:28:23,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:23,779 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:28:23,779 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:23,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,779 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:23,779 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:23,779 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,34301,1731961702842' ***** 2024-11-18T20:28:23,779 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:28:23,779 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:28:23,779 INFO [RS:0;c0a89b2656d4:34301 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:28:23,779 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:34301. 2024-11-18T20:28:23,780 DEBUG [RS:0;c0a89b2656d4:34301 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:28:23,780 DEBUG [RS:0;c0a89b2656d4:34301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:28:23,780 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:28:23,781 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T20:28:23,781 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:28:23,781 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:28:23,781 DEBUG [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:28:23,781 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:28:23,781 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:28:23,781 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:28:23,781 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:28:23,781 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T20:28:23,798 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/.tmp/ns/750e7c2b672f4e0e92f498b8e525f764 is 43, key is default/ns:d/1731961703707/Put/seqid=0 2024-11-18T20:28:23,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741835_1011 (size=5153) 2024-11-18T20:28:23,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741835_1011 (size=5153) 2024-11-18T20:28:23,804 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/.tmp/ns/750e7c2b672f4e0e92f498b8e525f764 2024-11-18T20:28:23,813 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/.tmp/ns/750e7c2b672f4e0e92f498b8e525f764 as hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/ns/750e7c2b672f4e0e92f498b8e525f764 2024-11-18T20:28:23,823 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/ns/750e7c2b672f4e0e92f498b8e525f764, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T20:28:23,824 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false 2024-11-18T20:28:23,824 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:28:23,831 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T20:28:23,831 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:28:23,832 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:23,832 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961703781Running coprocessor pre-close hooks at 1731961703781Disabling compacts and flushes for region at 1731961703781Disabling writes for close at 1731961703781Obtaining lock to block concurrent updates at 1731961703781Preparing flush snapshotting stores in 1588230740 at 1731961703781Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731961703781Flushing stores of hbase:meta,,1.1588230740 at 1731961703782 (+1 ms)Flushing 1588230740/ns: creating writer at 1731961703782Flushing 1588230740/ns: appending metadata at 1731961703797 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731961703797Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ba4bbf9: reopening flushed file at 1731961703812 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 43ms, sequenceid=6, compaction requested=false at 1731961703824 (+12 ms)Writing region close event to WAL at 1731961703826 (+2 ms)Running coprocessor post-close hooks at 1731961703831 (+5 ms)Closed at 1731961703832 (+1 ms) 2024-11-18T20:28:23,832 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:23,981 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,34301,1731961702842; all regions closed. 2024-11-18T20:28:23,982 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,982 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,982 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,982 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,982 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:28:23,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:28:23,988 DEBUG [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/oldWALs 2024-11-18T20:28:23,988 INFO [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C34301%2C1731961702842.meta:.meta(num 1731961703661) 2024-11-18T20:28:23,988 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,988 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,989 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,989 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,989 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:28:23,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:28:23,994 DEBUG [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/oldWALs 2024-11-18T20:28:23,994 INFO [RS:0;c0a89b2656d4:34301 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C34301%2C1731961702842:(num 1731961703245) 2024-11-18T20:28:23,994 DEBUG [RS:0;c0a89b2656d4:34301 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:23,994 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:28:23,994 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:28:23,995 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T20:28:23,995 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:28:23,995 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:28:23,995 INFO [RS:0;c0a89b2656d4:34301 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34301 2024-11-18T20:28:23,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:28:23,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,34301,1731961702842 2024-11-18T20:28:23,996 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:28:23,997 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,34301,1731961702842] 2024-11-18T20:28:23,998 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,34301,1731961702842 already deleted, retry=false 2024-11-18T20:28:23,998 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,34301,1731961702842 expired; onlineServers=0 2024-11-18T20:28:23,998 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,40277,1731961702792' ***** 2024-11-18T20:28:23,999 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:28:23,999 INFO [M:0;c0a89b2656d4:40277 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:28:23,999 INFO [M:0;c0a89b2656d4:40277 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:28:23,999 DEBUG [M:0;c0a89b2656d4:40277 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:28:23,999 DEBUG [M:0;c0a89b2656d4:40277 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:28:23,999 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:28:23,999 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961703028 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961703028,5,FailOnTimeoutGroup] 2024-11-18T20:28:23,999 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961703027 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961703027,5,FailOnTimeoutGroup] 2024-11-18T20:28:23,999 INFO [M:0;c0a89b2656d4:40277 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:28:23,999 INFO [M:0;c0a89b2656d4:40277 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:28:23,999 DEBUG [M:0;c0a89b2656d4:40277 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:28:23,999 INFO [M:0;c0a89b2656d4:40277 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:28:24,000 INFO [M:0;c0a89b2656d4:40277 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:28:24,000 INFO [M:0;c0a89b2656d4:40277 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:28:24,000 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:28:24,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:28:24,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:24,000 DEBUG [M:0;c0a89b2656d4:40277 {}] zookeeper.ZKUtil(347): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:28:24,000 WARN [M:0;c0a89b2656d4:40277 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:28:24,001 INFO [M:0;c0a89b2656d4:40277 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/.lastflushedseqids 2024-11-18T20:28:24,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741836_1012 (size=99) 2024-11-18T20:28:24,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741836_1012 (size=99) 2024-11-18T20:28:24,008 INFO [M:0;c0a89b2656d4:40277 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:28:24,008 INFO [M:0;c0a89b2656d4:40277 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:28:24,008 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:28:24,008 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:24,008 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:24,009 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:28:24,009 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:24,009 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T20:28:24,026 DEBUG [M:0;c0a89b2656d4:40277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b66dde6f3cc3422b81769f74bc08b37a is 82, key is hbase:meta,,1/info:regioninfo/1731961703691/Put/seqid=0 2024-11-18T20:28:24,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741837_1013 (size=5672) 2024-11-18T20:28:24,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741837_1013 (size=5672) 2024-11-18T20:28:24,032 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b66dde6f3cc3422b81769f74bc08b37a 2024-11-18T20:28:24,053 DEBUG [M:0;c0a89b2656d4:40277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9c2a17e656a54e08ab5e7ae245c3a128 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731961703712/Put/seqid=0 2024-11-18T20:28:24,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741838_1014 (size=5275) 2024-11-18T20:28:24,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741838_1014 (size=5275) 2024-11-18T20:28:24,059 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9c2a17e656a54e08ab5e7ae245c3a128 2024-11-18T20:28:24,082 DEBUG [M:0;c0a89b2656d4:40277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc3cb03bc7344cdb029e7e77efecbc4 is 69, key is c0a89b2656d4,34301,1731961702842/rs:state/1731961703086/Put/seqid=0 2024-11-18T20:28:24,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741839_1015 (size=5156) 2024-11-18T20:28:24,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741839_1015 (size=5156) 2024-11-18T20:28:24,088 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc3cb03bc7344cdb029e7e77efecbc4 2024-11-18T20:28:24,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:24,098 INFO [RS:0;c0a89b2656d4:34301 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:28:24,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34301-0x1005485a5dd0001, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:24,098 INFO [RS:0;c0a89b2656d4:34301 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,34301,1731961702842; zookeeper connection closed. 2024-11-18T20:28:24,098 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c7cbd55 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c7cbd55 2024-11-18T20:28:24,098 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:28:24,108 DEBUG [M:0;c0a89b2656d4:40277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b0cf003fcf4408f9c449701963c84ed is 52, key is load_balancer_on/state:d/1731961703774/Put/seqid=0 2024-11-18T20:28:24,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741840_1016 (size=5056) 2024-11-18T20:28:24,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741840_1016 (size=5056) 2024-11-18T20:28:24,115 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b0cf003fcf4408f9c449701963c84ed 2024-11-18T20:28:24,122 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b66dde6f3cc3422b81769f74bc08b37a as hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b66dde6f3cc3422b81769f74bc08b37a 2024-11-18T20:28:24,130 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b66dde6f3cc3422b81769f74bc08b37a, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T20:28:24,131 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9c2a17e656a54e08ab5e7ae245c3a128 as hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9c2a17e656a54e08ab5e7ae245c3a128 2024-11-18T20:28:24,138 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9c2a17e656a54e08ab5e7ae245c3a128, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T20:28:24,139 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3dc3cb03bc7344cdb029e7e77efecbc4 as hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dc3cb03bc7344cdb029e7e77efecbc4 2024-11-18T20:28:24,146 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3dc3cb03bc7344cdb029e7e77efecbc4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T20:28:24,147 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3b0cf003fcf4408f9c449701963c84ed as hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b0cf003fcf4408f9c449701963c84ed 2024-11-18T20:28:24,153 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33599/user/jenkins/test-data/5f42933d-b834-a8fd-46bd-2abbc47e3be7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3b0cf003fcf4408f9c449701963c84ed, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T20:28:24,155 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-18T20:28:24,156 INFO [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:24,157 DEBUG [M:0;c0a89b2656d4:40277 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961704008Disabling compacts and flushes for region at 1731961704008Disabling writes for close at 1731961704009 (+1 ms)Obtaining lock to block concurrent updates at 1731961704009Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961704009Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731961704009Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961704010 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961704010Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961704025 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961704025Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961704038 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961704052 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961704052Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961704066 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961704081 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961704081Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961704094 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961704107 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961704108 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f3206fa: reopening flushed file at 1731961704121 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5555c448: reopening flushed file at 1731961704130 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1bf9f8a9: reopening flushed file at 1731961704138 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63bedd53: reopening flushed file at 1731961704146 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1731961704155 (+9 ms)Writing region close event to WAL at 1731961704156 (+1 ms)Closed at 1731961704156 2024-11-18T20:28:24,157 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:24,157 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:24,157 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:24,157 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:24,157 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38061 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:28:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44809 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:28:24,162 INFO [M:0;c0a89b2656d4:40277 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:28:24,162 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:28:24,162 INFO [M:0;c0a89b2656d4:40277 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40277 2024-11-18T20:28:24,162 INFO [M:0;c0a89b2656d4:40277 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:28:24,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:24,264 INFO [M:0;c0a89b2656d4:40277 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:28:24,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40277-0x1005485a5dd0000, quorum=127.0.0.1:53039, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:28:24,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68317950{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:24,267 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@587d64cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:24,267 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:24,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66361119{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:24,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69c2cb49{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:24,268 WARN [BP-2016463703-172.17.0.2-1731961702178 heartbeating to localhost/127.0.0.1:33599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:24,268 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:24,269 WARN [BP-2016463703-172.17.0.2-1731961702178 heartbeating to localhost/127.0.0.1:33599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2016463703-172.17.0.2-1731961702178 (Datanode Uuid 8afcdf2f-54f3-427d-8328-c315aec4ef3d) service to localhost/127.0.0.1:33599 2024-11-18T20:28:24,269 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:24,269 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data3/current/BP-2016463703-172.17.0.2-1731961702178 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:24,269 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data4/current/BP-2016463703-172.17.0.2-1731961702178 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:24,270 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:24,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@383a5779{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:24,274 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58220e1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:24,274 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:24,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@304195bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:24,274 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24bc4d6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:24,275 WARN [BP-2016463703-172.17.0.2-1731961702178 heartbeating to localhost/127.0.0.1:33599 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:24,275 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:24,275 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:24,275 WARN [BP-2016463703-172.17.0.2-1731961702178 heartbeating to localhost/127.0.0.1:33599 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2016463703-172.17.0.2-1731961702178 (Datanode Uuid cef3fb04-c061-40ba-9322-4f2d86d1e64e) service to localhost/127.0.0.1:33599 2024-11-18T20:28:24,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data1/current/BP-2016463703-172.17.0.2-1731961702178 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:24,276 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/cluster_26f87c84-0164-23ac-6729-845adc338194/data/data2/current/BP-2016463703-172.17.0.2-1731961702178 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:24,276 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:24,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e033196{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:28:24,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3bde2993{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:24,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:24,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15baacc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:24,285 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ceaf06d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:24,292 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.log.dir so I do NOT create it in target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/980303e6-c16d-5174-6f88-eeb2261a9e54/hadoop.tmp.dir so I do NOT create it in target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916, deleteOnExit=true 2024-11-18T20:28:24,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/test.cache.data in system properties and HBase conf 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:28:24,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:28:24,310 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:28:24,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:28:24,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:28:24,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:28:24,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:28:24,326 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:28:24,379 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:24,385 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:24,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:24,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:24,390 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:28:24,390 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:24,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e9f62e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:24,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34db4bed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:24,487 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d91fc86{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-40885-hadoop-hdfs-3_4_1-tests_jar-_-any-8637820336262522210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:28:24,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@278b2194{HTTP/1.1, (http/1.1)}{localhost:40885} 2024-11-18T20:28:24,488 INFO [Time-limited test {}] server.Server(415): Started @103183ms 2024-11-18T20:28:24,500 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:28:24,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:24,556 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:24,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:24,557 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:24,557 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:24,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@250f574a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:24,558 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67450732{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:24,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@242079af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-35581-hadoop-hdfs-3_4_1-tests_jar-_-any-7259956335101598683/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:24,652 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@adfdc3f{HTTP/1.1, (http/1.1)}{localhost:35581} 2024-11-18T20:28:24,652 INFO [Time-limited test {}] server.Server(415): Started @103347ms 2024-11-18T20:28:24,653 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:24,685 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:24,690 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:24,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:24,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:24,691 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:24,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24afe84a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:24,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f1acac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:24,715 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data1/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:24,715 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data2/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:24,736 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:24,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2f6e5ff4babc54 with lease ID 0x1b33d7deb1657942: Processing first storage report for DS-f975aecd-40cf-4983-a42a-d1a0da49802a from datanode DatanodeRegistration(127.0.0.1:41037, datanodeUuid=8208717c-d571-47cd-91a4-4f27e9bd4380, infoPort=41523, infoSecurePort=0, ipcPort=39355, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:24,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2f6e5ff4babc54 with lease ID 0x1b33d7deb1657942: from storage DS-f975aecd-40cf-4983-a42a-d1a0da49802a node DatanodeRegistration(127.0.0.1:41037, datanodeUuid=8208717c-d571-47cd-91a4-4f27e9bd4380, infoPort=41523, infoSecurePort=0, ipcPort=39355, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:24,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2f6e5ff4babc54 with lease ID 0x1b33d7deb1657942: Processing first storage report for DS-a4611635-5260-4646-b2b7-a03d6ee56aef from datanode DatanodeRegistration(127.0.0.1:41037, datanodeUuid=8208717c-d571-47cd-91a4-4f27e9bd4380, infoPort=41523, infoSecurePort=0, ipcPort=39355, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:24,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2f6e5ff4babc54 with lease ID 0x1b33d7deb1657942: from storage DS-a4611635-5260-4646-b2b7-a03d6ee56aef node DatanodeRegistration(127.0.0.1:41037, datanodeUuid=8208717c-d571-47cd-91a4-4f27e9bd4380, infoPort=41523, infoSecurePort=0, ipcPort=39355, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:24,788 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cc343c1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-43719-hadoop-hdfs-3_4_1-tests_jar-_-any-10620965681122029257/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:24,789 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40493c32{HTTP/1.1, (http/1.1)}{localhost:43719} 2024-11-18T20:28:24,789 INFO [Time-limited test {}] server.Server(415): Started @103484ms 2024-11-18T20:28:24,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:24,856 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:24,856 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:24,877 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f43f360650850b2 with lease ID 0x1b33d7deb1657943: Processing first storage report for DS-a6a248e9-9f4d-411f-a6b1-573586317893 from datanode DatanodeRegistration(127.0.0.1:32861, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=34265, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f43f360650850b2 with lease ID 0x1b33d7deb1657943: from storage DS-a6a248e9-9f4d-411f-a6b1-573586317893 node DatanodeRegistration(127.0.0.1:32861, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=34265, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:28:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f43f360650850b2 with lease ID 0x1b33d7deb1657943: Processing first storage report for DS-f4758b62-1e55-46da-9dae-2d88667e0522 from datanode DatanodeRegistration(127.0.0.1:32861, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=34265, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f43f360650850b2 with lease ID 0x1b33d7deb1657943: from storage DS-f4758b62-1e55-46da-9dae-2d88667e0522 node DatanodeRegistration(127.0.0.1:32861, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=34265, infoSecurePort=0, ipcPort=38845, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:24,924 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404 2024-11-18T20:28:24,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/zookeeper_0, clientPort=49736, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:28:24,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49736 2024-11-18T20:28:24,928 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,930 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:28:24,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:28:24,943 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8 with version=8 2024-11-18T20:28:24,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:28:24,946 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:28:24,946 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:28:24,947 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42449 2024-11-18T20:28:24,950 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42449 connecting to ZooKeeper ensemble=127.0.0.1:49736 2024-11-18T20:28:24,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:424490x0, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:28:24,953 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42449-0x1005485ae4d0000 connected 2024-11-18T20:28:24,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,968 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,971 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:24,971 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8, hbase.cluster.distributed=false 2024-11-18T20:28:24,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:28:24,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-18T20:28:24,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42449 2024-11-18T20:28:24,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42449 2024-11-18T20:28:24,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-18T20:28:24,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42449 2024-11-18T20:28:24,989 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:28:24,989 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:28:24,990 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40455 2024-11-18T20:28:24,991 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40455 connecting to ZooKeeper ensemble=127.0.0.1:49736 2024-11-18T20:28:24,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,993 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:24,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404550x0, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:28:24,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40455-0x1005485ae4d0001 connected 2024-11-18T20:28:24,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:28:24,997 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:28:24,999 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:28:24,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:28:25,000 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:28:25,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40455 2024-11-18T20:28:25,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40455 2024-11-18T20:28:25,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40455 2024-11-18T20:28:25,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40455 2024-11-18T20:28:25,002 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40455 2024-11-18T20:28:25,014 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:42449 2024-11-18T20:28:25,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:25,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:25,016 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:28:25,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,017 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:28:25,018 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,42449,1731961704945 from backup master directory 2024-11-18T20:28:25,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:25,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:28:25,019 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:28:25,019 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,023 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/hbase.id] with ID: 3fe98fc1-fbba-4530-971f-2473f0cf2d7f 2024-11-18T20:28:25,023 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/.tmp/hbase.id 2024-11-18T20:28:25,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:28:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:28:25,030 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/.tmp/hbase.id]:[hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/hbase.id] 2024-11-18T20:28:25,043 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:25,043 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:28:25,045 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:28:25,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:28:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:28:25,057 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:28:25,057 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:28:25,058 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:25,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:28:25,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:28:25,066 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store 2024-11-18T20:28:25,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:28:25,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:28:25,074 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:25,074 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:28:25,074 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:25,074 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:25,074 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:28:25,074 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:25,074 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:28:25,075 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961705074Disabling compacts and flushes for region at 1731961705074Disabling writes for close at 1731961705074Writing region close event to WAL at 1731961705074Closed at 1731961705074 2024-11-18T20:28:25,075 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/.initializing 2024-11-18T20:28:25,075 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,079 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C42449%2C1731961704945, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/oldWALs, maxLogs=10 2024-11-18T20:28:25,080 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C42449%2C1731961704945.1731961705079 2024-11-18T20:28:25,087 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 2024-11-18T20:28:25,089 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265),(127.0.0.1/127.0.0.1:41523:41523)] 2024-11-18T20:28:25,091 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:28:25,091 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:25,091 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,091 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:28:25,096 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,097 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:28:25,099 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:25,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:28:25,101 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:25,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:28:25,103 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:25,103 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,104 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,104 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,106 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,106 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:28:25,106 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,106 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:28:25,108 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:28:25,110 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:28:25,111 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761966, jitterRate=-0.03111104667186737}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:28:25,113 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961705091Initializing all the Stores at 1731961705093 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705093Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961705093Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961705093Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961705093Cleaning up temporary data from old regions at 1731961705106 (+13 ms)Region opened successfully at 1731961705112 (+6 ms) 2024-11-18T20:28:25,113 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:28:25,118 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39816fbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:28:25,119 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:28:25,119 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:28:25,119 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:28:25,119 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:28:25,120 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:28:25,121 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:28:25,121 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:28:25,123 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:28:25,124 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:28:25,125 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:28:25,125 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:28:25,126 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:28:25,126 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:28:25,127 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:28:25,128 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:28:25,128 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:28:25,130 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:28:25,130 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:28:25,133 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:28:25,134 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:28:25,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:25,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:28:25,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,135 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,42449,1731961704945, sessionid=0x1005485ae4d0000, setting cluster-up flag (Was=false) 2024-11-18T20:28:25,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,140 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:28:25,141 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,147 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:28:25,148 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,149 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:28:25,151 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:25,151 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:28:25,151 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:28:25,152 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,42449,1731961704945 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:28:25,153 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:25,153 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:25,153 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:25,154 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:28:25,154 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:28:25,154 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,154 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:28:25,154 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961735157 2024-11-18T20:28:25,157 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:28:25,157 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:28:25,157 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:28:25,158 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,158 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:28:25,159 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,160 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:28:25,160 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:28:25,161 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:28:25,165 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:28:25,165 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:28:25,165 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961705165,5,FailOnTimeoutGroup] 2024-11-18T20:28:25,166 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961705165,5,FailOnTimeoutGroup] 2024-11-18T20:28:25,166 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,166 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:28:25,166 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,166 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:28:25,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:28:25,171 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:28:25,171 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8 2024-11-18T20:28:25,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:28:25,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:28:25,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:25,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:28:25,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:28:25,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:28:25,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:28:25,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:28:25,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:28:25,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:28:25,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:28:25,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:28:25,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740 2024-11-18T20:28:25,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740 2024-11-18T20:28:25,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:28:25,196 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:28:25,196 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:28:25,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:28:25,200 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:28:25,200 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=697547, jitterRate=-0.11302398145198822}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:28:25,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961705181Initializing all the Stores at 1731961705182 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705182Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705182Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961705182Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705182Cleaning up temporary data from old regions at 1731961705196 (+14 ms)Region opened successfully at 1731961705201 (+5 ms) 2024-11-18T20:28:25,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:28:25,201 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:28:25,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:28:25,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:28:25,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:28:25,202 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:28:25,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961705201Disabling compacts and flushes for region at 1731961705201Disabling writes for close at 1731961705201Writing region close event to WAL at 1731961705202 (+1 ms)Closed at 1731961705202 2024-11-18T20:28:25,204 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:25,204 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:28:25,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:28:25,204 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(746): ClusterId : 3fe98fc1-fbba-4530-971f-2473f0cf2d7f 2024-11-18T20:28:25,204 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:28:25,206 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:28:25,206 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:28:25,206 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:28:25,207 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:28:25,208 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:28:25,209 DEBUG [RS:0;c0a89b2656d4:40455 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45fb486e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:28:25,222 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:40455 2024-11-18T20:28:25,223 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:28:25,223 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:28:25,223 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:28:25,224 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,42449,1731961704945 with port=40455, startcode=1731961704988 2024-11-18T20:28:25,224 DEBUG [RS:0;c0a89b2656d4:40455 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:28:25,226 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50777, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:28:25,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42449 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,226 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42449 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,228 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8 2024-11-18T20:28:25,228 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39387 2024-11-18T20:28:25,228 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:28:25,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:28:25,230 DEBUG [RS:0;c0a89b2656d4:40455 {}] zookeeper.ZKUtil(111): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,230 WARN [RS:0;c0a89b2656d4:40455 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:28:25,230 INFO [RS:0;c0a89b2656d4:40455 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:25,230 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,230 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,40455,1731961704988] 2024-11-18T20:28:25,234 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:28:25,236 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:28:25,237 INFO [RS:0;c0a89b2656d4:40455 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:28:25,237 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,237 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:28:25,238 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:28:25,239 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,239 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,240 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:25,240 DEBUG [RS:0;c0a89b2656d4:40455 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,240 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40455,1731961704988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:28:25,255 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:28:25,255 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,40455,1731961704988-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,255 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,255 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.Replication(171): c0a89b2656d4,40455,1731961704988 started 2024-11-18T20:28:25,269 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,269 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,40455,1731961704988, RpcServer on c0a89b2656d4/172.17.0.2:40455, sessionid=0x1005485ae4d0001 2024-11-18T20:28:25,269 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:28:25,269 DEBUG [RS:0;c0a89b2656d4:40455 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,269 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,40455,1731961704988' 2024-11-18T20:28:25,269 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:28:25,270 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,40455,1731961704988' 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:28:25,271 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:28:25,272 DEBUG [RS:0;c0a89b2656d4:40455 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:28:25,272 INFO [RS:0;c0a89b2656d4:40455 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:28:25,272 INFO [RS:0;c0a89b2656d4:40455 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:28:25,358 WARN [c0a89b2656d4:42449 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:28:25,374 INFO [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C40455%2C1731961704988, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs, maxLogs=32 2024-11-18T20:28:25,375 INFO [RS:0;c0a89b2656d4:40455 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961705375 2024-11-18T20:28:25,381 INFO [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 2024-11-18T20:28:25,385 DEBUG [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265),(127.0.0.1/127.0.0.1:41523:41523)] 2024-11-18T20:28:25,608 DEBUG [c0a89b2656d4:42449 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:28:25,610 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,616 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,40455,1731961704988, state=OPENING 2024-11-18T20:28:25,619 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:28:25,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:28:25,623 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:28:25,623 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:25,623 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:25,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40455,1731961704988}] 2024-11-18T20:28:25,778 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:28:25,781 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35217, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:28:25,788 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:28:25,788 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:25,792 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C40455%2C1731961704988.meta, suffix=.meta, logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs, maxLogs=32 2024-11-18T20:28:25,793 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta 2024-11-18T20:28:25,803 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta 2024-11-18T20:28:25,804 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265),(127.0.0.1/127.0.0.1:41523:41523)] 2024-11-18T20:28:25,805 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:28:25,805 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:28:25,806 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:28:25,806 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:28:25,806 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:28:25,806 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:25,806 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:28:25,806 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:28:25,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:28:25,830 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:28:25,830 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:28:25,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:28:25,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,832 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:28:25,833 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:28:25,833 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:28:25,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:28:25,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:25,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:28:25,835 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:28:25,835 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740 2024-11-18T20:28:25,836 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740 2024-11-18T20:28:25,838 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:28:25,838 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:28:25,838 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:28:25,839 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:28:25,840 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860531, jitterRate=0.09422282874584198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:28:25,840 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:28:25,841 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961705806Writing region info on filesystem at 1731961705806Initializing all the Stores at 1731961705807 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705807Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705829 (+22 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961705829Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961705829Cleaning up temporary data from old regions at 1731961705838 (+9 ms)Running coprocessor post-open hooks at 1731961705840 (+2 ms)Region opened successfully at 1731961705841 (+1 ms) 2024-11-18T20:28:25,842 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961705778 2024-11-18T20:28:25,844 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:28:25,845 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:28:25,845 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,846 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,40455,1731961704988, state=OPEN 2024-11-18T20:28:25,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:28:25,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:28:25,849 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:25,849 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:25,849 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:28:25,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:28:25,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,40455,1731961704988 in 226 msec 2024-11-18T20:28:25,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:28:25,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 648 msec 2024-11-18T20:28:25,856 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:28:25,856 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:28:25,857 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:28:25,857 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,40455,1731961704988, seqNum=-1] 2024-11-18T20:28:25,858 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:28:25,859 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52401, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:28:25,865 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 714 msec 2024-11-18T20:28:25,866 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961705865, completionTime=-1 2024-11-18T20:28:25,866 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:28:25,866 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:28:25,867 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:28:25,867 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961765867 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961825868 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:42449, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,868 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,870 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.853sec 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:28:25,872 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:28:25,874 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:28:25,874 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:28:25,874 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42449,1731961704945-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@707b5b0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:25,905 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,42449,-1 for getting cluster id 2024-11-18T20:28:25,905 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:28:25,906 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3fe98fc1-fbba-4530-971f-2473f0cf2d7f' 2024-11-18T20:28:25,906 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:28:25,907 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3fe98fc1-fbba-4530-971f-2473f0cf2d7f" 2024-11-18T20:28:25,907 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21c334b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:25,907 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,42449,-1] 2024-11-18T20:28:25,907 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:28:25,907 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:28:25,909 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37002, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:28:25,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@474e0967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:28:25,910 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:28:25,911 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,40455,1731961704988, seqNum=-1] 2024-11-18T20:28:25,911 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:28:25,913 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52970, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:28:25,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:25,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:25,918 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:28:25,933 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:28:25,933 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:28:25,934 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42387 2024-11-18T20:28:25,936 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42387 connecting to ZooKeeper ensemble=127.0.0.1:49736 2024-11-18T20:28:25,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:25,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:28:25,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423870x0, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:28:25,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:423870x0, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-18T20:28:25,944 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-18T20:28:25,944 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42387-0x1005485ae4d0002 connected 2024-11-18T20:28:25,945 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:28:25,945 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:28:25,946 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:28:25,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:28:25,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42387 2024-11-18T20:28:25,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42387 2024-11-18T20:28:25,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42387 2024-11-18T20:28:25,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42387 2024-11-18T20:28:25,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42387 2024-11-18T20:28:25,954 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(746): ClusterId : 3fe98fc1-fbba-4530-971f-2473f0cf2d7f 2024-11-18T20:28:25,954 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:28:25,956 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:28:25,956 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:28:25,957 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:28:25,957 DEBUG [RS:1;c0a89b2656d4:42387 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7257e1ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:28:25,969 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c0a89b2656d4:42387 2024-11-18T20:28:25,969 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:28:25,969 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:28:25,969 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:28:25,970 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,42449,1731961704945 with port=42387, startcode=1731961705932 2024-11-18T20:28:25,970 DEBUG [RS:1;c0a89b2656d4:42387 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:28:25,972 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:28:25,972 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42449 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:25,972 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42449 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:25,974 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8 2024-11-18T20:28:25,974 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39387 2024-11-18T20:28:25,974 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:28:25,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:28:25,976 DEBUG [RS:1;c0a89b2656d4:42387 {}] zookeeper.ZKUtil(111): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:25,976 WARN [RS:1;c0a89b2656d4:42387 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:28:25,976 INFO [RS:1;c0a89b2656d4:42387 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:28:25,976 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,42387,1731961705932] 2024-11-18T20:28:25,976 DEBUG [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:25,979 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:28:25,981 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:28:25,981 INFO [RS:1;c0a89b2656d4:42387 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:28:25,981 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,981 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:28:25,982 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:28:25,982 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,982 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:25,983 DEBUG [RS:1;c0a89b2656d4:42387 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,984 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42387,1731961705932-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:28:25,999 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:28:25,999 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42387,1731961705932-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:25,999 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:26,000 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.Replication(171): c0a89b2656d4,42387,1731961705932 started 2024-11-18T20:28:26,013 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:28:26,013 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,42387,1731961705932, RpcServer on c0a89b2656d4/172.17.0.2:42387, sessionid=0x1005485ae4d0002 2024-11-18T20:28:26,013 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:28:26,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c0a89b2656d4:42387,5,FailOnTimeoutGroup] 2024-11-18T20:28:26,013 DEBUG [RS:1;c0a89b2656d4:42387 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:26,013 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,42387,1731961705932' 2024-11-18T20:28:26,013 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:28:26,013 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-18T20:28:26,014 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,42387,1731961705932 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,42387,1731961705932' 2024-11-18T20:28:26,014 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:28:26,015 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:28:26,015 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c0a89b2656d4,42449,1731961704945 2024-11-18T20:28:26,015 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4294e837 2024-11-18T20:28:26,015 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:28:26,015 DEBUG [RS:1;c0a89b2656d4:42387 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:28:26,015 INFO [RS:1;c0a89b2656d4:42387 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:28:26,015 INFO [RS:1;c0a89b2656d4:42387 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:28:26,017 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37014, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:28:26,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:28:26,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:28:26,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:28:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:28:26,020 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:28:26,021 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:26,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-18T20:28:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:28:26,022 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:28:26,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:28:26,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:28:26,031 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 20e7148231b1ff05dfbd06fdd1de5020, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8 2024-11-18T20:28:26,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41037 is added to blk_1073741836_1012 (size=76) 2024-11-18T20:28:26,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32861 is added to blk_1073741836_1012 (size=76) 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 20e7148231b1ff05dfbd06fdd1de5020, disabling compactions & flushes 2024-11-18T20:28:26,038 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. after waiting 0 ms 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,038 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,038 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 20e7148231b1ff05dfbd06fdd1de5020: Waiting for close lock at 1731961706038Disabling compacts and flushes for region at 1731961706038Disabling writes for close at 1731961706038Writing region close event to WAL at 1731961706038Closed at 1731961706038 2024-11-18T20:28:26,040 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:28:26,040 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731961706040"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961706040"}]},"ts":"1731961706040"} 2024-11-18T20:28:26,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:28:26,044 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:28:26,044 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961706044"}]},"ts":"1731961706044"} 2024-11-18T20:28:26,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-18T20:28:26,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=20e7148231b1ff05dfbd06fdd1de5020, ASSIGN}] 2024-11-18T20:28:26,049 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=20e7148231b1ff05dfbd06fdd1de5020, ASSIGN 2024-11-18T20:28:26,050 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=20e7148231b1ff05dfbd06fdd1de5020, ASSIGN; state=OFFLINE, location=c0a89b2656d4,40455,1731961704988; forceNewPlan=false, retain=false 2024-11-18T20:28:26,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T20:28:26,119 INFO [RS:1;c0a89b2656d4:42387 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C42387%2C1731961705932, suffix=, logDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932, archiveDir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs, maxLogs=32 2024-11-18T20:28:26,121 INFO [RS:1;c0a89b2656d4:42387 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C42387%2C1731961705932.1731961706120 2024-11-18T20:28:26,130 INFO [RS:1;c0a89b2656d4:42387 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 2024-11-18T20:28:26,131 DEBUG [RS:1;c0a89b2656d4:42387 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34265:34265),(127.0.0.1/127.0.0.1:41523:41523)] 2024-11-18T20:28:26,202 INFO [c0a89b2656d4:42449 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-18T20:28:26,202 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=20e7148231b1ff05dfbd06fdd1de5020, regionState=OPENING, regionLocation=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:26,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:26,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=20e7148231b1ff05dfbd06fdd1de5020, ASSIGN because future has completed 2024-11-18T20:28:26,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20e7148231b1ff05dfbd06fdd1de5020, server=c0a89b2656d4,40455,1731961704988}] 2024-11-18T20:28:26,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:26,375 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,376 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 20e7148231b1ff05dfbd06fdd1de5020, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:28:26,377 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,377 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:28:26,377 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,377 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,380 INFO [StoreOpener-20e7148231b1ff05dfbd06fdd1de5020-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,382 INFO [StoreOpener-20e7148231b1ff05dfbd06fdd1de5020-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20e7148231b1ff05dfbd06fdd1de5020 columnFamilyName info 2024-11-18T20:28:26,382 DEBUG [StoreOpener-20e7148231b1ff05dfbd06fdd1de5020-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:28:26,383 INFO [StoreOpener-20e7148231b1ff05dfbd06fdd1de5020-1 {}] regionserver.HStore(327): Store=20e7148231b1ff05dfbd06fdd1de5020/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:28:26,383 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,384 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,384 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,385 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,385 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,388 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,391 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:28:26,391 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 20e7148231b1ff05dfbd06fdd1de5020; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730135, jitterRate=-0.07158616185188293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:28:26,391 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:26,392 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 20e7148231b1ff05dfbd06fdd1de5020: Running coprocessor pre-open hook at 1731961706378Writing region info on filesystem at 1731961706378Initializing all the Stores at 1731961706379 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961706379Cleaning up temporary data from old regions at 1731961706385 (+6 ms)Running coprocessor post-open hooks at 1731961706391 (+6 ms)Region opened successfully at 1731961706392 (+1 ms) 2024-11-18T20:28:26,393 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020., pid=6, masterSystemTime=1731961706364 2024-11-18T20:28:26,396 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,396 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:26,397 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=20e7148231b1ff05dfbd06fdd1de5020, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,40455,1731961704988 2024-11-18T20:28:26,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20e7148231b1ff05dfbd06fdd1de5020, server=c0a89b2656d4,40455,1731961704988 because future has completed 2024-11-18T20:28:26,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:28:26,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 20e7148231b1ff05dfbd06fdd1de5020, server=c0a89b2656d4,40455,1731961704988 in 192 msec 2024-11-18T20:28:26,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:28:26,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=20e7148231b1ff05dfbd06fdd1de5020, ASSIGN in 357 msec 2024-11-18T20:28:26,408 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:28:26,408 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961706408"}]},"ts":"1731961706408"} 2024-11-18T20:28:26,410 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-18T20:28:26,412 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:28:26,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 394 msec 2024-11-18T20:28:26,729 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:28:26,742 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:26,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:26,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:26,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:31,614 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:28:31,620 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:31,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:31,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:31,644 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:28:31,652 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-18T20:28:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:28:36,106 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-18T20:28:36,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42449 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:28:36,117 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-18T20:28:36,117 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-18T20:28:36,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:28:36,121 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:36,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:36,139 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:36,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:36,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:36,139 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:36,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32740753{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:36,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c275cf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:36,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1994569b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-42697-hadoop-hdfs-3_4_1-tests_jar-_-any-8567858882044928870/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:36,236 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5aaa7b2{HTTP/1.1, (http/1.1)}{localhost:42697} 2024-11-18T20:28:36,236 INFO [Time-limited test {}] server.Server(415): Started @114931ms 2024-11-18T20:28:36,237 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:36,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:36,271 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:36,272 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:36,272 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:36,272 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:28:36,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4067b708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:36,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f9c997c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:36,300 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data5/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,300 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data6/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,319 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:36,323 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c3baa8218d1ac4 with lease ID 0x1b33d7deb1657944: Processing first storage report for DS-e9427e4b-7f48-4b93-a91a-51c2f507948b from datanode DatanodeRegistration(127.0.0.1:33063, datanodeUuid=4de92e37-2dec-4c9f-be23-bf4279aca5ed, infoPort=45477, infoSecurePort=0, ipcPort=41213, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c3baa8218d1ac4 with lease ID 0x1b33d7deb1657944: from storage DS-e9427e4b-7f48-4b93-a91a-51c2f507948b node DatanodeRegistration(127.0.0.1:33063, datanodeUuid=4de92e37-2dec-4c9f-be23-bf4279aca5ed, infoPort=45477, infoSecurePort=0, ipcPort=41213, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c3baa8218d1ac4 with lease ID 0x1b33d7deb1657944: Processing first storage report for DS-e6c175a1-acbc-4627-85f7-433a75941764 from datanode DatanodeRegistration(127.0.0.1:33063, datanodeUuid=4de92e37-2dec-4c9f-be23-bf4279aca5ed, infoPort=45477, infoSecurePort=0, ipcPort=41213, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c3baa8218d1ac4 with lease ID 0x1b33d7deb1657944: from storage DS-e6c175a1-acbc-4627-85f7-433a75941764 node DatanodeRegistration(127.0.0.1:33063, datanodeUuid=4de92e37-2dec-4c9f-be23-bf4279aca5ed, infoPort=45477, infoSecurePort=0, ipcPort=41213, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65fce847{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-41697-hadoop-hdfs-3_4_1-tests_jar-_-any-16431595574175382623/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:36,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69cce1cc{HTTP/1.1, (http/1.1)}{localhost:41697} 2024-11-18T20:28:36,374 INFO [Time-limited test {}] server.Server(415): Started @115069ms 2024-11-18T20:28:36,375 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:36,406 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:36,409 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:36,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:36,410 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:36,410 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:28:36,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab2015c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:36,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47081358{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:36,438 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,438 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,461 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bf4300f5b34539 with lease ID 0x1b33d7deb1657945: Processing first storage report for DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc from datanode DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bf4300f5b34539 with lease ID 0x1b33d7deb1657945: from storage DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc node DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bf4300f5b34539 with lease ID 0x1b33d7deb1657945: Processing first storage report for DS-dead0f9c-d368-489e-8d5c-08a681882bd0 from datanode DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,464 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bf4300f5b34539 with lease ID 0x1b33d7deb1657945: from storage DS-dead0f9c-d368-489e-8d5c-08a681882bd0 node DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a43b168{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-34657-hadoop-hdfs-3_4_1-tests_jar-_-any-1579743118555958228/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:36,513 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9115f41{HTTP/1.1, (http/1.1)}{localhost:34657} 2024-11-18T20:28:36,513 INFO [Time-limited test {}] server.Server(415): Started @115208ms 2024-11-18T20:28:36,514 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:36,570 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data9/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,570 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data10/current/BP-151693428-172.17.0.2-1731961704338/current, will proceed with Du for space computation calculation, 2024-11-18T20:28:36,590 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:36,592 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38e8ffcb4956657f with lease ID 0x1b33d7deb1657946: Processing first storage report for DS-522b1c0c-a9c5-49a4-bb98-166b22753984 from datanode DatanodeRegistration(127.0.0.1:34125, datanodeUuid=371b1b6e-a372-49cb-8061-fe6efe1e70ae, infoPort=40687, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,593 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38e8ffcb4956657f with lease ID 0x1b33d7deb1657946: from storage DS-522b1c0c-a9c5-49a4-bb98-166b22753984 node DatanodeRegistration(127.0.0.1:34125, datanodeUuid=371b1b6e-a372-49cb-8061-fe6efe1e70ae, infoPort=40687, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,593 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38e8ffcb4956657f with lease ID 0x1b33d7deb1657946: Processing first storage report for DS-9a12bb7a-049c-45af-afcf-bd2315651dd0 from datanode DatanodeRegistration(127.0.0.1:34125, datanodeUuid=371b1b6e-a372-49cb-8061-fe6efe1e70ae, infoPort=40687, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338) 2024-11-18T20:28:36,593 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38e8ffcb4956657f with lease ID 0x1b33d7deb1657946: from storage DS-9a12bb7a-049c-45af-afcf-bd2315651dd0 node DatanodeRegistration(127.0.0.1:34125, datanodeUuid=371b1b6e-a372-49cb-8061-fe6efe1e70ae, infoPort=40687, infoSecurePort=0, ipcPort=37701, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:36,632 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,632 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,632 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,632 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,633 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 block BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:36,633 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 block BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:36,633 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:36,633 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta block BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:36,635 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-242235263_22 at /127.0.0.1:44078 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:32861:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44078 dst: /127.0.0.1:32861 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:54726 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54726 dst: /127.0.0.1:41037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:54708 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54708 dst: /127.0.0.1:41037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,637 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cc343c1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:36,636 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:44026 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32861:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44026 dst: /127.0.0.1:32861 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,635 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:44048 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32861:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44048 dst: /127.0.0.1:32861 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,635 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:43992 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32861:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43992 dst: /127.0.0.1:32861 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,638 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40493c32{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:36,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:54732 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54732 dst: /127.0.0.1:41037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,638 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:36,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-242235263_22 at /127.0.0.1:54762 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54762 dst: /127.0.0.1:41037 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,638 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f1acac6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:36,639 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24afe84a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:36,640 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:36,640 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:36,640 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid 0d4885e3-a96e-4f09-acbd-9e9639f9484d) service to localhost/127.0.0.1:39387 2024-11-18T20:28:36,640 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:36,641 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:36,641 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:36,642 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:36,642 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,642 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 block BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset by peer at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,642 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 block BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,643 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:44590 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44590 dst: /127.0.0.1:41037 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:36,643 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta block BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@242079af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:36,647 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@adfdc3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:36,647 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:36,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67450732{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:36,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@250f574a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:36,648 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:36,648 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:36,648 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid 8208717c-d571-47cd-91a4-4f27e9bd4380) service to localhost/127.0.0.1:39387 2024-11-18T20:28:36,648 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:36,649 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data1/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:36,649 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data2/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:36,649 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:36,653 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020., hostname=c0a89b2656d4,40455,1731961704988, seqNum=2] 2024-11-18T20:28:36,654 ERROR [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8-prefix:c0a89b2656d4,40455,1731961704988 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,654 WARN [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8-prefix:c0a89b2656d4,40455,1731961704988 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,654 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,654 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40455%2C1731961704988:(num 1731961705375) roll requested 2024-11-18T20:28:36,655 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961716655 2024-11-18T20:28:36,660 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:36,660 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:36,660 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:36,661 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:36,661 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:36,661 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 2024-11-18T20:28:36,661 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,661 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:36,662 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-18T20:28:36,663 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-18T20:28:36,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 2024-11-18T20:28:36,664 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40687:40687),(127.0.0.1/127.0.0.1:45477:45477)] 2024-11-18T20:28:36,664 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:36,666 WARN [IPC Server handler 3 on default port 39387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-18T20:28:36,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 after 5ms 2024-11-18T20:28:37,059 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:37,985 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:38,665 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:38,666 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 2024-11-18T20:28:38,667 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:38,668 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 block BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:38,668 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:49950 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49950 dst: /127.0.0.1:34125 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:38,669 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:33524 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33524 dst: /127.0.0.1:33063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:38,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a43b168{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:38,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9115f41{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:38,673 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:38,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47081358{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:38,674 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab2015c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:38,675 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:38,675 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:38,675 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid 371b1b6e-a372-49cb-8061-fe6efe1e70ae) service to localhost/127.0.0.1:39387 2024-11-18T20:28:38,675 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:38,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data9/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:38,676 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data10/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:38,676 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:39,060 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:39,985 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:40,665 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:40,666 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]] 2024-11-18T20:28:40,666 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40455%2C1731961704988:(num 1731961716655) roll requested 2024-11-18T20:28:40,666 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961720666 2024-11-18T20:28:40,671 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 after 4008ms 2024-11-18T20:28:40,672 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:40,673 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:40,673 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741839_1021 2024-11-18T20:28:40,677 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:40,680 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:40,680 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:40,680 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741840_1022 2024-11-18T20:28:40,681 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:40,681 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:28:40,682 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:40,682 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:40,682 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741841_1023 2024-11-18T20:28:40,683 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:40,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:40,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:40,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:40,687 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:40,687 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:40,688 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961720666 2024-11-18T20:28:40,688 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45477:45477),(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:28:40,688 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:40,688 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 is not closed yet, will try archiving it next time 2024-11-18T20:28:40,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33063 is added to blk_1073741838_1020 (size=3600) 2024-11-18T20:28:41,061 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:41,091 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:41,986 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,684 WARN [ResponseProcessor for block BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,685 WARN [DataStreamer for file /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961720666 block BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:42,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:33816 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33816 dst: /127.0.0.1:33063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:42,685 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57264 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57264 dst: /127.0.0.1:36623 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:42,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1994569b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:42,687 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5aaa7b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:28:42,687 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:28:42,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c275cf8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:28:42,688 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32740753{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:28:42,689 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:28:42,689 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:28:42,689 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,689 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid 4de92e37-2dec-4c9f-be23-bf4279aca5ed) service to localhost/127.0.0.1:39387 2024-11-18T20:28:42,689 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:28:42,689 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]] 2024-11-18T20:28:42,689 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40455%2C1731961704988:(num 1731961720666) roll requested 2024-11-18T20:28:42,690 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961722689 2024-11-18T20:28:42,690 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data5/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:42,690 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data6/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:28:42,690 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:28:42,692 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,692 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:42,692 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741843_1026 2024-11-18T20:28:42,693 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:42,694 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,694 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:42,694 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741844_1027 2024-11-18T20:28:42,695 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:42,696 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,697 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:42,697 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741845_1028 2024-11-18T20:28:42,697 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:42,699 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,699 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:42,699 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741846_1029 2024-11-18T20:28:42,700 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:42,701 WARN [IPC Server handler 0 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:42,701 WARN [IPC Server handler 0 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:42,701 WARN [IPC Server handler 0 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:42,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:28:42,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:42,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:42,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:42,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:42,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:42,705 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961720666 with entries=6, filesize=6.11 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961722689 2024-11-18T20:28:42,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741842_1025 (size=6261) 2024-11-18T20:28:42,707 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:42,709 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:28:42,709 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:42,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/77182a3c37694daa9169001084167e2b is 1080, key is row0002/info:/1731961718678/Put/seqid=0 2024-11-18T20:28:42,725 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,725 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:42,725 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741848_1031 2024-11-18T20:28:42,726 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:42,727 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,727 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:42,727 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741849_1032 2024-11-18T20:28:42,728 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:42,729 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,729 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:42,729 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741850_1033 2024-11-18T20:28:42,729 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:42,731 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:42,731 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57290 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034 to mirror 127.0.0.1:41037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:42,731 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:42,731 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034 2024-11-18T20:28:42,732 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57290 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:28:42,732 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57290 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57290 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:42,732 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:42,733 WARN [IPC Server handler 1 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:42,733 WARN [IPC Server handler 1 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:42,733 WARN [IPC Server handler 1 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:42,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741852_1035 (size=10347) 2024-11-18T20:28:43,061 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:43,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/77182a3c37694daa9169001084167e2b 2024-11-18T20:28:43,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/77182a3c37694daa9169001084167e2b as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b 2024-11-18T20:28:43,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b, entries=5, sequenceid=11, filesize=10.1 K 2024-11-18T20:28:43,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 20e7148231b1ff05dfbd06fdd1de5020 in 452ms, sequenceid=11, compaction requested=false 2024-11-18T20:28:43,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:28:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:43,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-18T20:28:43,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/2423343a92da4539adc99566efb2bdc9 is 1080, key is row0007/info:/1731961722703/Put/seqid=0 2024-11-18T20:28:43,349 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:43,349 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:43,349 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741853_1036 2024-11-18T20:28:43,350 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:43,351 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:43,351 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:43,351 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741854_1037 2024-11-18T20:28:43,352 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:43,353 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:43,354 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:43,354 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741855_1038 2024-11-18T20:28:43,354 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:43,355 WARN [Thread-930 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:43,355 WARN [Thread-930 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:43,355 WARN [Thread-930 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741856_1039 2024-11-18T20:28:43,356 WARN [Thread-930 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:43,356 WARN [IPC Server handler 1 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:43,357 WARN [IPC Server handler 1 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:43,357 WARN [IPC Server handler 1 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:43,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741857_1040 (size=12506) 2024-11-18T20:28:43,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/2423343a92da4539adc99566efb2bdc9 2024-11-18T20:28:43,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/2423343a92da4539adc99566efb2bdc9 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9 2024-11-18T20:28:43,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9, entries=7, sequenceid=24, filesize=12.2 K 2024-11-18T20:28:43,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 20e7148231b1ff05dfbd06fdd1de5020 in 434ms, sequenceid=24, compaction requested=false 2024-11-18T20:28:43,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:28:43,775 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-18T20:28:43,775 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:43,775 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9 because midkey is the same as first or last row 2024-11-18T20:28:43,986 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,710 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,710 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]] 2024-11-18T20:28:44,710 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40455%2C1731961704988:(num 1731961722689) roll requested 2024-11-18T20:28:44,711 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961724710 2024-11-18T20:28:44,716 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,716 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:44,716 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741858_1041 2024-11-18T20:28:44,717 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:44,722 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,722 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57324 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042 to mirror 127.0.0.1:33063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,723 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:44,723 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042 2024-11-18T20:28:44,723 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57324 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:28:44,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57324 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57324 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,724 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:44,727 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57326 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043 to mirror 127.0.0.1:41037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,727 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:44,727 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57326 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:28:44,727 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043 2024-11-18T20:28:44,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57326 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57326 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,728 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:44,731 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,731 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57336 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,731 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:44,731 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044 2024-11-18T20:28:44,731 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57336 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:28:44,731 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57336 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57336 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,732 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:44,732 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:44,732 WARN [IPC Server handler 4 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:44,732 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:44,735 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:44,735 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:44,735 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:44,736 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:44,736 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:44,736 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961722689 with entries=18, filesize=18.21 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961724710 2024-11-18T20:28:44,737 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:28:44,737 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:44,737 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961722689 is not closed yet, will try archiving it next time 2024-11-18T20:28:44,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741847_1030 (size=18655) 2024-11-18T20:28:44,737 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs/c0a89b2656d4%2C40455%2C1731961704988.1731961716655 2024-11-18T20:28:44,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961720666 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs/c0a89b2656d4%2C40455%2C1731961704988.1731961720666 2024-11-18T20:28:44,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:44,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:28:44,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/a3f666badf3a4bed9762927302501554 is 1079, key is tmprow/info:/1731961724778/Put/seqid=0 2024-11-18T20:28:44,790 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,790 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:44,790 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741863_1046 2024-11-18T20:28:44,791 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:44,792 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,792 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:44,792 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741864_1047 2024-11-18T20:28:44,793 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:44,795 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,795 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57356 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048 to mirror 127.0.0.1:41037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,796 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:44,796 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048 2024-11-18T20:28:44,796 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57356 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:28:44,796 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57356 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57356 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,796 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:44,799 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:44,798 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57370 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,799 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:44,799 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049 2024-11-18T20:28:44,799 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57370 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:28:44,799 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57370 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57370 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:44,800 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:44,800 WARN [IPC Server handler 2 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:44,801 WARN [IPC Server handler 2 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:44,801 WARN [IPC Server handler 2 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:44,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741867_1050 (size=6027) 2024-11-18T20:28:45,062 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:45,140 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 is not closed yet, will try archiving it next time 2024-11-18T20:28:45,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/a3f666badf3a4bed9762927302501554 2024-11-18T20:28:45,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/a3f666badf3a4bed9762927302501554 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554 2024-11-18T20:28:45,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554, entries=1, sequenceid=34, filesize=5.9 K 2024-11-18T20:28:45,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 20e7148231b1ff05dfbd06fdd1de5020 in 447ms, sequenceid=34, compaction requested=true 2024-11-18T20:28:45,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:28:45,228 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-18T20:28:45,228 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:45,228 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9 because midkey is the same as first or last row 2024-11-18T20:28:45,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e7148231b1ff05dfbd06fdd1de5020:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:28:45,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:28:45,229 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:28:45,230 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:28:45,230 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1541): 20e7148231b1ff05dfbd06fdd1de5020/info is initiating minor compaction (all files) 2024-11-18T20:28:45,230 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 20e7148231b1ff05dfbd06fdd1de5020/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:28:45,231 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554] into tmpdir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp, totalSize=28.2 K 2024-11-18T20:28:45,231 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77182a3c37694daa9169001084167e2b, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961718678 2024-11-18T20:28:45,232 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2423343a92da4539adc99566efb2bdc9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731961722703 2024-11-18T20:28:45,232 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting a3f666badf3a4bed9762927302501554, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731961724778 2024-11-18T20:28:45,246 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e7148231b1ff05dfbd06fdd1de5020#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:28:45,247 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/d94e4ec07b4542459c443f3d24868ff8 is 1080, key is row0002/info:/1731961718678/Put/seqid=0 2024-11-18T20:28:45,248 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:45,249 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:45,249 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741868_1051 2024-11-18T20:28:45,249 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:45,251 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:45,251 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:45,251 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741869_1052 2024-11-18T20:28:45,251 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:45,254 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:32861 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:45,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57428 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053 to mirror 127.0.0.1:32861 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:45,254 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:45,254 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053 2024-11-18T20:28:45,254 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57428 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:28:45,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57428 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57428 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:45,255 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:45,256 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:45,256 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:45,256 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741871_1054 2024-11-18T20:28:45,257 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:45,257 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:45,257 WARN [IPC Server handler 4 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:45,257 WARN [IPC Server handler 4 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:45,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741872_1055 (size=17994) 2024-11-18T20:28:45,676 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/d94e4ec07b4542459c443f3d24868ff8 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 2024-11-18T20:28:45,684 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 20e7148231b1ff05dfbd06fdd1de5020/info of 20e7148231b1ff05dfbd06fdd1de5020 into d94e4ec07b4542459c443f3d24868ff8(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:28:45,685 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020., storeName=20e7148231b1ff05dfbd06fdd1de5020/info, priority=13, startTime=1731961725228; duration=0sec 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 because midkey is the same as first or last row 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:45,685 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 because midkey is the same as first or last row 2024-11-18T20:28:45,686 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T20:28:45,686 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:45,686 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 because midkey is the same as first or last row 2024-11-18T20:28:45,686 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:28:45,686 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e7148231b1ff05dfbd06fdd1de5020:info 2024-11-18T20:28:45,987 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:28:46,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:28:46,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/7038f61944594000b93281ceb1e3d2a2 is 1079, key is tmprow/info:/1731961726206/Put/seqid=0 2024-11-18T20:28:46,221 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,221 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:46,221 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741873_1056 2024-11-18T20:28:46,222 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:46,224 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,224 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]) is bad. 2024-11-18T20:28:46,224 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741874_1057 2024-11-18T20:28:46,225 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:32861,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK] 2024-11-18T20:28:46,227 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57442 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:46,227 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:46,227 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058 2024-11-18T20:28:46,227 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57442 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:28:46,228 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:57442 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57442 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:46,228 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:46,229 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,230 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:46,230 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741876_1059 2024-11-18T20:28:46,230 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:46,231 WARN [IPC Server handler 0 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T20:28:46,231 WARN [IPC Server handler 0 on default port 39387 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T20:28:46,231 WARN [IPC Server handler 0 on default port 39387 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T20:28:46,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741877_1060 (size=6027) 2024-11-18T20:28:46,478 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@641d722a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741842_1025 to 127.0.0.1:33063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:46,478 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73f5fadf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741852_1035 to 127.0.0.1:41037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:46,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/7038f61944594000b93281ceb1e3d2a2 2024-11-18T20:28:46,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/7038f61944594000b93281ceb1e3d2a2 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2 2024-11-18T20:28:46,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2, entries=1, sequenceid=45, filesize=5.9 K 2024-11-18T20:28:46,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 20e7148231b1ff05dfbd06fdd1de5020 in 450ms, sequenceid=45, compaction requested=false 2024-11-18T20:28:46,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:28:46,659 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-18T20:28:46,659 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:28:46,659 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 because midkey is the same as first or last row 2024-11-18T20:28:46,738 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:46,738 WARN [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-18T20:28:46,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:28:46,832 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:28:46,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:28:46,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:28:46,833 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:28:46,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@91cf5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:28:46,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@597eff8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:28:46,925 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d33cd9d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/java.io.tmpdir/jetty-localhost-43881-hadoop-hdfs-3_4_1-tests_jar-_-any-18345057633517961825/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:28:46,925 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70fc09c{HTTP/1.1, (http/1.1)}{localhost:43881} 2024-11-18T20:28:46,925 INFO [Time-limited test {}] server.Server(415): Started @125621ms 2024-11-18T20:28:46,927 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:28:47,001 WARN [Thread-977 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:28:47,008 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0abfea74b2220cd with lease ID 0x1b33d7deb1657947: from storage DS-a6a248e9-9f4d-411f-a6b1-573586317893 node DatanodeRegistration(127.0.0.1:38219, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=35509, infoSecurePort=0, ipcPort=43905, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:47,009 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf0abfea74b2220cd with lease ID 0x1b33d7deb1657947: from storage DS-f4758b62-1e55-46da-9dae-2d88667e0522 node DatanodeRegistration(127.0.0.1:38219, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=35509, infoSecurePort=0, ipcPort=43905, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:28:47,062 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:47,467 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73f5fadf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741857_1040 to 127.0.0.1:33063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:47,467 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@641d722a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741847_1030 to 127.0.0.1:33063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:47,987 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:48,738 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:49,063 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:49,470 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73f5fadf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741872_1055 to 127.0.0.1:34125 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:49,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741867_1050 (size=6027) 2024-11-18T20:28:49,988 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:50,468 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@641d722a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741877_1060 to 127.0.0.1:33063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:50,739 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:51,064 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:51,988 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:52,739 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:53,064 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:53,989 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:54,739 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:54,924 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:28:55,064 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,160 ERROR [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData-prefix:c0a89b2656d4,42449,1731961704945 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,160 WARN [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData-prefix:c0a89b2656d4,42449,1731961704945 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,160 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C42449%2C1731961704945:(num 1731961705079) roll requested 2024-11-18T20:28:55,161 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C42449%2C1731961704945.1731961735161 2024-11-18T20:28:55,169 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42768 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4]'}, localName='127.0.0.1:38219', datanodeUuid='0d4885e3-a96e-4f09-acbd-9e9639f9484d', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:55,169 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:28:55,170 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061 2024-11-18T20:28:55,170 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42768 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:28:55,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42768 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:38219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42768 dst: /127.0.0.1:38219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:55,170 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:28:55,172 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,172 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42770 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4]'}, localName='127.0.0.1:38219', datanodeUuid='0d4885e3-a96e-4f09-acbd-9e9639f9484d', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062 to mirror 127.0.0.1:33063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:55,172 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:28:55,172 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062 2024-11-18T20:28:55,172 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42770 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T20:28:55,172 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787648513_22 at /127.0.0.1:42770 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:38219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42770 dst: /127.0.0.1:38219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:55,173 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:28:55,174 WARN [Thread-998 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,174 WARN [Thread-998 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:28:55,174 WARN [Thread-998 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741880_1063 2024-11-18T20:28:55,175 WARN [Thread-998 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:28:55,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:55,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:55,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:55,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:55,179 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:28:55,179 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961735161 2024-11-18T20:28:55,180 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,180 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:55,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 2024-11-18T20:28:55,180 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35509:35509),(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:28:55,181 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 is not closed yet, will try archiving it next time 2024-11-18T20:28:55,181 WARN [IPC Server handler 3 on default port 39387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 has not been closed. Lease recovery is in progress. RecoveryId = 1065 for block blk_1073741830_1006 2024-11-18T20:28:55,181 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 after 1ms 2024-11-18T20:28:55,989 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:56,740 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:57,025 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5f0a58ff {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41037,null,null]) java.net.ConnectException: Call From c0a89b2656d4/172.17.0.2 to localhost:39355 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:28:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741833_1019 (size=455) 2024-11-18T20:28:57,700 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs/c0a89b2656d4%2C40455%2C1731961704988.1731961705375 2024-11-18T20:28:57,703 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961722689 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs/c0a89b2656d4%2C40455%2C1731961704988.1731961722689 2024-11-18T20:28:57,990 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:58,009 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6e9fe2bf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38219, datanodeUuid=0d4885e3-a96e-4f09-acbd-9e9639f9484d, infoPort=35509, infoSecurePort=0, ipcPort=43905, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741833_1019 to 127.0.0.1:41037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:28:58,741 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:28:59,184 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/WALs/c0a89b2656d4,42449,1731961704945/c0a89b2656d4%2C42449%2C1731961704945.1731961705079 after 4003ms 2024-11-18T20:28:59,991 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:00,742 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:01,991 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,368 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.1731961742368 2024-11-18T20:29:02,376 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,376 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,376 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741882_1066 2024-11-18T20:29:02,378 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,381 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,382 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,382 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741883_1067 2024-11-18T20:29:02,383 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,387 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,387 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,388 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961724710 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961742368 2024-11-18T20:29:02,389 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35509:35509),(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:29:02,389 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961724710 is not closed yet, will try archiving it next time 2024-11-18T20:29:02,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741862_1045 (size=13591) 2024-11-18T20:29:02,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:29:02,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:29:02,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/1faf3f8b67754419ba85b1bce4fe26a9 is 1080, key is row0013/info:/1731961742390/Put/seqid=0 2024-11-18T20:29:02,406 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,406 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:29:02,406 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741885_1069 2024-11-18T20:29:02,407 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:29:02,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45316 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,409 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,409 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45316 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,410 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,410 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070 2024-11-18T20:29:02,410 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45316 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741886_1070] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45316 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,410 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741887_1071 (size=11421) 2024-11-18T20:29:02,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741887_1071 (size=11421) 2024-11-18T20:29:02,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/1faf3f8b67754419ba85b1bce4fe26a9 2024-11-18T20:29:02,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/1faf3f8b67754419ba85b1bce4fe26a9 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9 2024-11-18T20:29:02,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9, entries=6, sequenceid=55, filesize=11.2 K 2024-11-18T20:29:02,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 20e7148231b1ff05dfbd06fdd1de5020 in 34ms, sequenceid=55, compaction requested=true 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 because midkey is the same as first or last row 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 20e7148231b1ff05dfbd06fdd1de5020:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:29:02,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:29:02,433 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:29:02,435 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:29:02,435 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1541): 20e7148231b1ff05dfbd06fdd1de5020/info is initiating minor compaction (all files) 2024-11-18T20:29:02,435 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 20e7148231b1ff05dfbd06fdd1de5020/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,435 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9] into tmpdir=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp, totalSize=34.6 K 2024-11-18T20:29:02,435 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting d94e4ec07b4542459c443f3d24868ff8, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731961718678 2024-11-18T20:29:02,436 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7038f61944594000b93281ceb1e3d2a2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731961726206 2024-11-18T20:29:02,436 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1faf3f8b67754419ba85b1bce4fe26a9, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731961726612 2024-11-18T20:29:02,451 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 20e7148231b1ff05dfbd06fdd1de5020#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:29:02,452 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/ed605b9c60c54015aec43e9aa775ee31 is 1080, key is row0002/info:/1731961718678/Put/seqid=0 2024-11-18T20:29:02,454 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45348 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072 to mirror 127.0.0.1:33063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,454 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,454 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072 2024-11-18T20:29:02,455 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45348 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45348 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741888_1072] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45348 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,455 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,457 WARN [Thread-1028 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,457 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45360 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,457 WARN [Thread-1028 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,457 WARN [Thread-1028 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073 2024-11-18T20:29:02,457 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45360 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,457 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45360 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741889_1073] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45360 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,458 WARN [Thread-1028 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741890_1074 (size=23502) 2024-11-18T20:29:02,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741890_1074 (size=23502) 2024-11-18T20:29:02,470 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/ed605b9c60c54015aec43e9aa775ee31 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/ed605b9c60c54015aec43e9aa775ee31 2024-11-18T20:29:02,477 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 20e7148231b1ff05dfbd06fdd1de5020/info of 20e7148231b1ff05dfbd06fdd1de5020 into ed605b9c60c54015aec43e9aa775ee31(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:29:02,477 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:29:02,478 INFO [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020., storeName=20e7148231b1ff05dfbd06fdd1de5020/info, priority=13, startTime=1731961742433; duration=0sec 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/ed605b9c60c54015aec43e9aa775ee31 because midkey is the same as first or last row 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/ed605b9c60c54015aec43e9aa775ee31 because midkey is the same as first or last row 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/ed605b9c60c54015aec43e9aa775ee31 because midkey is the same as first or last row 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:29:02,478 DEBUG [RS:0;c0a89b2656d4:40455-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 20e7148231b1ff05dfbd06fdd1de5020:info 2024-11-18T20:29:02,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40455 {}] regionserver.HRegion(8855): Flush requested on 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:29:02,620 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 20e7148231b1ff05dfbd06fdd1de5020 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T20:29:02,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/089c86f2e2534bbcb094d0b0291b772a is 1080, key is row0018/info:/1731961742400/Put/seqid=0 2024-11-18T20:29:02,629 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,630 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,630 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741891_1075 2024-11-18T20:29:02,630 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,632 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,632 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:29:02,632 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741892_1076 2024-11-18T20:29:02,633 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:29:02,634 WARN [Thread-1036 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,634 WARN [Thread-1036 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,634 WARN [Thread-1036 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741893_1077 2024-11-18T20:29:02,635 WARN [Thread-1036 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741894_1078 (size=11421) 2024-11-18T20:29:02,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741894_1078 (size=11421) 2024-11-18T20:29:02,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/089c86f2e2534bbcb094d0b0291b772a 2024-11-18T20:29:02,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/.tmp/info/089c86f2e2534bbcb094d0b0291b772a as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/089c86f2e2534bbcb094d0b0291b772a 2024-11-18T20:29:02,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/089c86f2e2534bbcb094d0b0291b772a, entries=6, sequenceid=66, filesize=11.2 K 2024-11-18T20:29:02,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 20e7148231b1ff05dfbd06fdd1de5020 in 39ms, sequenceid=66, compaction requested=false 2024-11-18T20:29:02,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 20e7148231b1ff05dfbd06fdd1de5020: 2024-11-18T20:29:02,660 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-11-18T20:29:02,660 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:29:02,660 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/ed605b9c60c54015aec43e9aa775ee31 because midkey is the same as first or last row 2024-11-18T20:29:02,743 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,743 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-18T20:29:02,792 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.1731961724710 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs/c0a89b2656d4%2C40455%2C1731961704988.1731961724710 2024-11-18T20:29:02,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:29:02,822 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:29:02,823 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:02,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:02,824 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:02,824 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:29:02,824 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:29:02,824 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1048628396, stopped=false 2024-11-18T20:29:02,825 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,42449,1731961704945 2024-11-18T20:29:02,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:02,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:02,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:02,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:02,829 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:29:02,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:02,829 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:29:02,829 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:02,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:02,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:02,830 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,40455,1731961704988' ***** 2024-11-18T20:29:02,830 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:29:02,830 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,42387,1731961705932' ***** 2024-11-18T20:29:02,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:02,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:02,830 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:29:02,830 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:02,831 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:29:02,831 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:29:02,831 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:29:02,831 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:29:02,831 INFO [RS:1;c0a89b2656d4:42387 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:29:02,831 INFO [RS:0;c0a89b2656d4:40455 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:29:02,831 INFO [RS:1;c0a89b2656d4:42387 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:29:02,831 INFO [RS:0;c0a89b2656d4:40455 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:29:02,831 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,42387,1731961705932 2024-11-18T20:29:02,831 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(3091): Received CLOSE for 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:29:02,831 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,40455,1731961704988 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:29:02,832 INFO [RS:1;c0a89b2656d4:42387 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c0a89b2656d4:42387. 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:40455. 2024-11-18T20:29:02,832 DEBUG [RS:1;c0a89b2656d4:42387 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:02,832 DEBUG [RS:1;c0a89b2656d4:42387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:02,832 DEBUG [RS:0;c0a89b2656d4:40455 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:02,832 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 20e7148231b1ff05dfbd06fdd1de5020, disabling compactions & flushes 2024-11-18T20:29:02,832 DEBUG [RS:0;c0a89b2656d4:40455 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:02,832 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,42387,1731961705932; all regions closed. 2024-11-18T20:29:02,832 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:29:02,832 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:29:02,832 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. after waiting 0 ms 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:29:02,832 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:29:02,832 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,832 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:29:02,832 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 20e7148231b1ff05dfbd06fdd1de5020=TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.} 2024-11-18T20:29:02,832 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,832 DEBUG [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 20e7148231b1ff05dfbd06fdd1de5020 2024-11-18T20:29:02,832 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,832 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:29:02,832 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,832 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9] to archive 2024-11-18T20:29:02,833 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,833 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:29:02,833 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:29:02,833 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:29:02,833 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:29:02,833 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,833 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-18T20:29:02,833 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,833 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 2024-11-18T20:29:02,833 ERROR [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8-prefix:c0a89b2656d4,40455,1731961704988.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,833 WARN [FSHLog-0-hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8-prefix:c0a89b2656d4,40455,1731961704988.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,834 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C40455%2C1731961704988.meta:.meta(num 1731961705793) roll requested 2024-11-18T20:29:02,834 WARN [IPC Server handler 4 on default port 39387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1013 2024-11-18T20:29:02,834 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C40455%2C1731961704988.meta.1731961742834.meta 2024-11-18T20:29:02,834 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:29:02,834 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 after 1ms 2024-11-18T20:29:02,836 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/77182a3c37694daa9169001084167e2b 2024-11-18T20:29:02,836 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,836 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,836 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741895_1080 2024-11-18T20:29:02,837 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,838 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/2423343a92da4539adc99566efb2bdc9 2024-11-18T20:29:02,838 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,838 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,838 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741896_1081 2024-11-18T20:29:02,839 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,839 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/d94e4ec07b4542459c443f3d24868ff8 2024-11-18T20:29:02,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK], DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:29:02,840 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741897_1082 2024-11-18T20:29:02,840 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/a3f666badf3a4bed9762927302501554 2024-11-18T20:29:02,841 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:29:02,842 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/7038f61944594000b93281ceb1e3d2a2 2024-11-18T20:29:02,843 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9 to hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/info/1faf3f8b67754419ba85b1bce4fe26a9 2024-11-18T20:29:02,844 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c0a89b2656d4:42449 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:29:02,844 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [77182a3c37694daa9169001084167e2b=10347, 2423343a92da4539adc99566efb2bdc9=12506, d94e4ec07b4542459c443f3d24868ff8=17994, a3f666badf3a4bed9762927302501554=6027, 7038f61944594000b93281ceb1e3d2a2=6027, 1faf3f8b67754419ba85b1bce4fe26a9=11421] 2024-11-18T20:29:02,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,845 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,845 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,845 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,845 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:02,846 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961742834.meta 2024-11-18T20:29:02,849 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,849 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,849 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta 2024-11-18T20:29:02,849 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/default/TestLogRolling-testLogRollOnDatanodeDeath/20e7148231b1ff05dfbd06fdd1de5020/recovered.edits/69.seqid, newMaxSeqId=69, maxSeqId=1 2024-11-18T20:29:02,850 WARN [IPC Server handler 4 on default port 39387 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-18T20:29:02,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta after 1ms 2024-11-18T20:29:02,850 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,850 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 20e7148231b1ff05dfbd06fdd1de5020: Waiting for close lock at 1731961742832Running coprocessor pre-close hooks at 1731961742832Disabling compacts and flushes for region at 1731961742832Disabling writes for close at 1731961742832Writing region close event to WAL at 1731961742845 (+13 ms)Running coprocessor post-close hooks at 1731961742850 (+5 ms)Closed at 1731961742850 2024-11-18T20:29:02,851 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020. 2024-11-18T20:29:02,853 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35509:35509),(127.0.0.1/127.0.0.1:33687:33687)] 2024-11-18T20:29:02,853 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta is not closed yet, will try archiving it next time 2024-11-18T20:29:02,869 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/info/d78fb4da353a476d85e98dc0c25b388a is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731961706017.20e7148231b1ff05dfbd06fdd1de5020./info:regioninfo/1731961706397/Put/seqid=0 2024-11-18T20:29:02,871 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,871 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,871 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741899_1085 2024-11-18T20:29:02,872 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,874 WARN [Thread-1049 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,874 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45390 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086 to mirror 127.0.0.1:41037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,874 WARN [Thread-1049 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:29:02,874 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45390 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,874 WARN [Thread-1049 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086 2024-11-18T20:29:02,874 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45390 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45390 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,875 WARN [Thread-1049 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:29:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741901_1087 (size=7089) 2024-11-18T20:29:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741901_1087 (size=7089) 2024-11-18T20:29:02,884 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/info/d78fb4da353a476d85e98dc0c25b388a 2024-11-18T20:29:02,904 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/ns/fe6075833a384e989043e0e5e38be911 is 43, key is default/ns:d/1731961705860/Put/seqid=0 2024-11-18T20:29:02,907 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41037 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,907 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:60820 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4]'}, localName='127.0.0.1:38219', datanodeUuid='0d4885e3-a96e-4f09-acbd-9e9639f9484d', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088 to mirror 127.0.0.1:41037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,907 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38219,DS-a6a248e9-9f4d-411f-a6b1-573586317893,DISK], DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK]) is bad. 2024-11-18T20:29:02,907 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088 2024-11-18T20:29:02,907 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:60820 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,908 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:60820 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:38219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60820 dst: /127.0.0.1:38219 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,908 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41037,DS-f975aecd-40cf-4983-a42a-d1a0da49802a,DISK] 2024-11-18T20:29:02,909 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,909 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK]) is bad. 2024-11-18T20:29:02,909 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741903_1089 2024-11-18T20:29:02,910 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33063,DS-e9427e4b-7f48-4b93-a91a-51c2f507948b,DISK] 2024-11-18T20:29:02,912 WARN [Thread-1056 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34125 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:02,912 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45414 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8]'}, localName='127.0.0.1:36623', datanodeUuid='a176c638-4aff-4918-9728-beb54f865e3c', xmitsInProgress=0}:Exception transferring block BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090 to mirror 127.0.0.1:34125 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,912 WARN [Thread-1056 {}] hdfs.DataStreamer(1731): Error Recovery for BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36623,DS-0da080f6-d713-489b-8bb0-5d76df2ef3cc,DISK], DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK]) is bad. 2024-11-18T20:29:02,912 WARN [Thread-1056 {}] hdfs.DataStreamer(1850): Abandoning BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090 2024-11-18T20:29:02,912 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45414 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090] {}] datanode.BlockReceiver(316): Block 1073741904 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T20:29:02,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1262743641_22 at /127.0.0.1:45414 [Receiving block BP-151693428-172.17.0.2-1731961704338:blk_1073741904_1090] {}] datanode.DataXceiver(331): 127.0.0.1:36623:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45414 dst: /127.0.0.1:36623 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:02,913 WARN [Thread-1056 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34125,DS-522b1c0c-a9c5-49a4-bb98-166b22753984,DISK] 2024-11-18T20:29:02,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741905_1091 (size=5153) 2024-11-18T20:29:02,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741905_1091 (size=5153) 2024-11-18T20:29:02,918 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/ns/fe6075833a384e989043e0e5e38be911 2024-11-18T20:29:02,937 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/table/2311b183b0d847c28311f43fb174beaa is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731961706408/Put/seqid=0 2024-11-18T20:29:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741906_1092 (size=5424) 2024-11-18T20:29:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741906_1092 (size=5424) 2024-11-18T20:29:02,942 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/table/2311b183b0d847c28311f43fb174beaa 2024-11-18T20:29:02,948 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/info/d78fb4da353a476d85e98dc0c25b388a as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/info/d78fb4da353a476d85e98dc0c25b388a 2024-11-18T20:29:02,953 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/info/d78fb4da353a476d85e98dc0c25b388a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T20:29:02,954 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/ns/fe6075833a384e989043e0e5e38be911 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/ns/fe6075833a384e989043e0e5e38be911 2024-11-18T20:29:02,960 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/ns/fe6075833a384e989043e0e5e38be911, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:29:02,961 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/.tmp/table/2311b183b0d847c28311f43fb174beaa as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/table/2311b183b0d847c28311f43fb174beaa 2024-11-18T20:29:02,967 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/table/2311b183b0d847c28311f43fb174beaa, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:29:02,968 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-18T20:29:02,972 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:29:02,973 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:02,973 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:02,973 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961742832Running coprocessor pre-close hooks at 1731961742832Disabling compacts and flushes for region at 1731961742832Disabling writes for close at 1731961742833 (+1 ms)Obtaining lock to block concurrent updates at 1731961742833Preparing flush snapshotting stores in 1588230740 at 1731961742833Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731961742833Flushing stores of hbase:meta,,1.1588230740 at 1731961742854 (+21 ms)Flushing 1588230740/info: creating writer at 1731961742854Flushing 1588230740/info: appending metadata at 1731961742868 (+14 ms)Flushing 1588230740/info: closing flushed file at 1731961742868Flushing 1588230740/ns: creating writer at 1731961742890 (+22 ms)Flushing 1588230740/ns: appending metadata at 1731961742903 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731961742903Flushing 1588230740/table: creating writer at 1731961742923 (+20 ms)Flushing 1588230740/table: appending metadata at 1731961742937 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731961742937Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a74b4e8: reopening flushed file at 1731961742947 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9885401: reopening flushed file at 1731961742953 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79536cc9: reopening flushed file at 1731961742960 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1731961742968 (+8 ms)Writing region close event to WAL at 1731961742969 (+1 ms)Running coprocessor post-close hooks at 1731961742973 (+4 ms)Closed at 1731961742973 2024-11-18T20:29:02,973 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:03,033 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,40455,1731961704988; all regions closed. 2024-11-18T20:29:03,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:03,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:03,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:03,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:03,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:03,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741898_1083 (size=825) 2024-11-18T20:29:03,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741898_1083 (size=825) 2024-11-18T20:29:03,053 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:29:03,053 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:29:03,241 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:29:03,241 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:29:03,243 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:03,987 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:04,472 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@641d722a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36623, datanodeUuid=a176c638-4aff-4918-9728-beb54f865e3c, infoPort=33687, infoSecurePort=0, ipcPort=35275, storageInfo=lv=-57;cid=testClusterID;nsid=1119231294;c=1731961704338):Failed to transfer BP-151693428-172.17.0.2-1731961704338:blk_1073741862_1045 to 127.0.0.1:41037 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:05,876 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:29:05,876 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:29:06,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T20:29:06,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:06,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:29:06,836 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 after 4003ms 2024-11-18T20:29:06,852 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta after 4003ms 2024-11-18T20:29:07,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741835_1011 (size=393) 2024-11-18T20:29:07,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:29:07,032 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4a1a384a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-151693428-172.17.0.2-1731961704338:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41037,null,null]) java.net.ConnectException: Call From c0a89b2656d4/172.17.0.2 to localhost:39355 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:29:07,833 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T20:29:07,839 DEBUG [RS:1;c0a89b2656d4:42387 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs 2024-11-18T20:29:07,840 INFO [RS:1;c0a89b2656d4:42387 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C42387%2C1731961705932:(num 1731961706120) 2024-11-18T20:29:07,840 DEBUG [RS:1;c0a89b2656d4:42387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:07,840 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:07,840 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:29:07,841 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:29:07,841 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:29:07,841 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:29:07,841 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:29:07,841 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:29:07,842 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:29:07,842 INFO [RS:1;c0a89b2656d4:42387 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42387 2024-11-18T20:29:07,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,42387,1731961705932 2024-11-18T20:29:07,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:29:07,845 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:29:07,847 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,42387,1731961705932] 2024-11-18T20:29:07,848 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,42387,1731961705932 already deleted, retry=false 2024-11-18T20:29:07,848 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,42387,1731961705932 expired; onlineServers=1 2024-11-18T20:29:07,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,872 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,879 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:07,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:07,947 INFO [RS:1;c0a89b2656d4:42387 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:29:07,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42387-0x1005485ae4d0002, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:07,947 INFO [RS:1;c0a89b2656d4:42387 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,42387,1731961705932; zookeeper connection closed. 2024-11-18T20:29:07,948 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7c44c8c7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7c44c8c7 2024-11-18T20:29:08,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:29:08,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:29:08,035 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T20:29:08,042 DEBUG [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs 2024-11-18T20:29:08,042 INFO [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C40455%2C1731961704988.meta:.meta(num 1731961742834) 2024-11-18T20:29:08,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,044 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741884_1068 (size=16308) 2024-11-18T20:29:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741884_1068 (size=16308) 2024-11-18T20:29:08,052 DEBUG [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/oldWALs 2024-11-18T20:29:08,052 INFO [RS:0;c0a89b2656d4:40455 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C40455%2C1731961704988:(num 1731961742368) 2024-11-18T20:29:08,052 DEBUG [RS:0;c0a89b2656d4:40455 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:08,052 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:08,052 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:29:08,053 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T20:29:08,053 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:29:08,053 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:29:08,054 INFO [RS:0;c0a89b2656d4:40455 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40455 2024-11-18T20:29:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,40455,1731961704988 2024-11-18T20:29:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:29:08,055 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:29:08,057 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,40455,1731961704988] 2024-11-18T20:29:08,058 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,40455,1731961704988 already deleted, retry=false 2024-11-18T20:29:08,058 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,40455,1731961704988 expired; onlineServers=0 2024-11-18T20:29:08,058 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,42449,1731961704945' ***** 2024-11-18T20:29:08,058 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:29:08,058 INFO [M:0;c0a89b2656d4:42449 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:29:08,058 INFO [M:0;c0a89b2656d4:42449 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:29:08,058 DEBUG [M:0;c0a89b2656d4:42449 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:29:08,058 DEBUG [M:0;c0a89b2656d4:42449 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:29:08,058 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:29:08,058 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961705165 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961705165,5,FailOnTimeoutGroup] 2024-11-18T20:29:08,058 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961705165 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961705165,5,FailOnTimeoutGroup] 2024-11-18T20:29:08,059 INFO [M:0;c0a89b2656d4:42449 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:29:08,059 INFO [M:0;c0a89b2656d4:42449 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:29:08,059 DEBUG [M:0;c0a89b2656d4:42449 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:29:08,059 INFO [M:0;c0a89b2656d4:42449 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:29:08,059 INFO [M:0;c0a89b2656d4:42449 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:29:08,059 INFO [M:0;c0a89b2656d4:42449 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:29:08,059 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:29:08,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:29:08,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:08,060 DEBUG [M:0;c0a89b2656d4:42449 {}] zookeeper.ZKUtil(347): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:29:08,060 WARN [M:0;c0a89b2656d4:42449 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:29:08,061 INFO [M:0;c0a89b2656d4:42449 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/.lastflushedseqids 2024-11-18T20:29:08,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741907_1093 (size=130) 2024-11-18T20:29:08,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741907_1093 (size=130) 2024-11-18T20:29:08,069 INFO [M:0;c0a89b2656d4:42449 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:29:08,069 INFO [M:0;c0a89b2656d4:42449 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:29:08,069 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:29:08,069 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:08,069 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:08,069 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:29:08,070 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:08,070 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-18T20:29:08,089 DEBUG [M:0;c0a89b2656d4:42449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7935eddb3b24989b3f722b5175d0829 is 82, key is hbase:meta,,1/info:regioninfo/1731961705845/Put/seqid=0 2024-11-18T20:29:08,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741908_1094 (size=5672) 2024-11-18T20:29:08,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741908_1094 (size=5672) 2024-11-18T20:29:08,094 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7935eddb3b24989b3f722b5175d0829 2024-11-18T20:29:08,118 DEBUG [M:0;c0a89b2656d4:42449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b6ca0a080164896a4cc6d9fe1691523 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961706413/Put/seqid=0 2024-11-18T20:29:08,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741909_1095 (size=6255) 2024-11-18T20:29:08,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741909_1095 (size=6255) 2024-11-18T20:29:08,123 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b6ca0a080164896a4cc6d9fe1691523 2024-11-18T20:29:08,128 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b6ca0a080164896a4cc6d9fe1691523 2024-11-18T20:29:08,141 DEBUG [M:0;c0a89b2656d4:42449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b70a3c12318f40f89e63ed6ad68ee007 is 69, key is c0a89b2656d4,40455,1731961704988/rs:state/1731961705226/Put/seqid=0 2024-11-18T20:29:08,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741910_1096 (size=5224) 2024-11-18T20:29:08,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741910_1096 (size=5224) 2024-11-18T20:29:08,146 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b70a3c12318f40f89e63ed6ad68ee007 2024-11-18T20:29:08,157 INFO [RS:0;c0a89b2656d4:40455 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:29:08,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:08,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40455-0x1005485ae4d0001, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:08,157 INFO [RS:0;c0a89b2656d4:40455 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,40455,1731961704988; zookeeper connection closed. 2024-11-18T20:29:08,157 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1322adf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1322adf1 2024-11-18T20:29:08,157 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-18T20:29:08,165 DEBUG [M:0;c0a89b2656d4:42449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90363280cab44a4ca15860a8da50bbe7 is 52, key is load_balancer_on/state:d/1731961705916/Put/seqid=0 2024-11-18T20:29:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741911_1097 (size=5056) 2024-11-18T20:29:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741911_1097 (size=5056) 2024-11-18T20:29:08,170 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90363280cab44a4ca15860a8da50bbe7 2024-11-18T20:29:08,176 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7935eddb3b24989b3f722b5175d0829 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7935eddb3b24989b3f722b5175d0829 2024-11-18T20:29:08,181 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7935eddb3b24989b3f722b5175d0829, entries=8, sequenceid=60, filesize=5.5 K 2024-11-18T20:29:08,182 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b6ca0a080164896a4cc6d9fe1691523 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b6ca0a080164896a4cc6d9fe1691523 2024-11-18T20:29:08,187 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b6ca0a080164896a4cc6d9fe1691523 2024-11-18T20:29:08,187 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b6ca0a080164896a4cc6d9fe1691523, entries=6, sequenceid=60, filesize=6.1 K 2024-11-18T20:29:08,188 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b70a3c12318f40f89e63ed6ad68ee007 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b70a3c12318f40f89e63ed6ad68ee007 2024-11-18T20:29:08,193 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b70a3c12318f40f89e63ed6ad68ee007, entries=2, sequenceid=60, filesize=5.1 K 2024-11-18T20:29:08,194 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90363280cab44a4ca15860a8da50bbe7 as hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90363280cab44a4ca15860a8da50bbe7 2024-11-18T20:29:08,199 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90363280cab44a4ca15860a8da50bbe7, entries=1, sequenceid=60, filesize=4.9 K 2024-11-18T20:29:08,200 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=60, compaction requested=false 2024-11-18T20:29:08,201 INFO [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:08,202 DEBUG [M:0;c0a89b2656d4:42449 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961748069Disabling compacts and flushes for region at 1731961748069Disabling writes for close at 1731961748069Obtaining lock to block concurrent updates at 1731961748070 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961748070Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731961748070Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961748071 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961748071Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961748088 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961748088Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961748100 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961748117 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961748117Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961748128 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961748141 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961748141Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961748151 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961748164 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961748164Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6d7b09a4: reopening flushed file at 1731961748175 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a05c243: reopening flushed file at 1731961748181 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@627ee407: reopening flushed file at 1731961748187 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@99ec6ae: reopening flushed file at 1731961748193 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=60, compaction requested=false at 1731961748200 (+7 ms)Writing region close event to WAL at 1731961748201 (+1 ms)Closed at 1731961748201 2024-11-18T20:29:08,202 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,202 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,202 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,202 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,202 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:08,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36623 is added to blk_1073741881_1064 (size=1045) 2024-11-18T20:29:08,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38219 is added to blk_1073741881_1064 (size=1045) 2024-11-18T20:29:08,205 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:29:08,205 INFO [M:0;c0a89b2656d4:42449 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:29:08,205 INFO [M:0;c0a89b2656d4:42449 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42449 2024-11-18T20:29:08,205 INFO [M:0;c0a89b2656d4:42449 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:29:08,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:08,306 INFO [M:0;c0a89b2656d4:42449 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:29:08,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42449-0x1005485ae4d0000, quorum=127.0.0.1:49736, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:08,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d33cd9d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:08,309 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70fc09c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:08,309 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:08,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@597eff8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:08,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@91cf5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:08,311 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:08,311 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:08,311 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid 0d4885e3-a96e-4f09-acbd-9e9639f9484d) service to localhost/127.0.0.1:39387 2024-11-18T20:29:08,311 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:08,311 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24661076 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41037,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39355 , LocalHost:localPort c0a89b2656d4/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T20:29:08,311 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24661076 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-151693428-172.17.0.2-1731961704338:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38219,null,null], DatanodeInfoWithStorage[127.0.0.1:41037,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-151693428-172.17.0.2-1731961704338 2024-11-18T20:29:08,311 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24661076 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38219,null,null]) java.io.IOException: No block pool offer service for bpid=BP-151693428-172.17.0.2-1731961704338 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:08,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data3/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:08,312 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24661076 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41037,null,null]) java.io.IOException: No block pool offer service for bpid=BP-151693428-172.17.0.2-1731961704338 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:08,312 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@24661076 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38219,null,null], DatanodeInfoWithStorage[127.0.0.1:41037,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-151693428-172.17.0.2-1731961704338:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38219,null,null], DatanodeInfoWithStorage[127.0.0.1:41037,null,null]] 2024-11-18T20:29:08,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data4/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:08,312 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:08,314 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65fce847{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:08,315 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69cce1cc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:08,315 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:08,315 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f9c997c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:08,315 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4067b708{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:08,316 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:08,316 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:08,316 WARN [BP-151693428-172.17.0.2-1731961704338 heartbeating to localhost/127.0.0.1:39387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-151693428-172.17.0.2-1731961704338 (Datanode Uuid a176c638-4aff-4918-9728-beb54f865e3c) service to localhost/127.0.0.1:39387 2024-11-18T20:29:08,316 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:08,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data7/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:08,317 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:08,317 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/cluster_dfc198ff-885a-4961-171c-fedc659a1916/data/data8/current/BP-151693428-172.17.0.2-1731961704338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:08,323 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d91fc86{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:29:08,324 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@278b2194{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:08,324 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:08,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34db4bed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:08,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e9f62e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:08,333 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:29:08,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:29:08,368 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=153 (was 78) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0a50bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f0a50bf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33599 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39387 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39387 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=50 (was 68), ProcessCount=11 (was 11), AvailableMemoryMB=2676 (was 3109) 2024-11-18T20:29:08,374 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=153, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=50, ProcessCount=11, AvailableMemoryMB=2676 2024-11-18T20:29:08,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:29:08,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.log.dir so I do NOT create it in target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059 2024-11-18T20:29:08,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6e6bbdf7-cf1f-badc-b8b2-a1e4db49f404/hadoop.tmp.dir so I do NOT create it in target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8, deleteOnExit=true 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/test.cache.data in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:29:08,375 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:29:08,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:29:08,376 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:29:08,382 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:29:08,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:29:08,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,396 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:08,461 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:08,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:08,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:08,470 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:08,470 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:08,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:08,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47f3561f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:08,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521c98fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:08,563 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53f1ce1d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-40415-hadoop-hdfs-3_4_1-tests_jar-_-any-18301977704290531434/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:29:08,564 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2024a4cb{HTTP/1.1, (http/1.1)}{localhost:40415} 2024-11-18T20:29:08,564 INFO [Time-limited test {}] server.Server(415): Started @147259ms 2024-11-18T20:29:08,575 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:29:08,620 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:08,623 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:08,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:08,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:08,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:08,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6cccfd06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:08,625 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e543aab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:08,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2586409d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-34821-hadoop-hdfs-3_4_1-tests_jar-_-any-10358517329601469287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:08,718 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7447a5c6{HTTP/1.1, (http/1.1)}{localhost:34821} 2024-11-18T20:29:08,719 INFO [Time-limited test {}] server.Server(415): Started @147414ms 2024-11-18T20:29:08,720 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:08,746 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:08,751 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:08,751 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:08,751 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:08,752 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:29:08,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d8e0e5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:08,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f4d9d98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:08,778 WARN [Thread-1184 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data1/current/BP-1523093664-172.17.0.2-1731961748415/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:08,778 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data2/current/BP-1523093664-172.17.0.2-1731961748415/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:08,793 WARN [Thread-1163 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:08,797 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b7b294fe9fe6270 with lease ID 0xb36dc9713e8dc61f: Processing first storage report for DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e from datanode DatanodeRegistration(127.0.0.1:35643, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=38713, infoSecurePort=0, ipcPort=37455, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415) 2024-11-18T20:29:08,797 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b7b294fe9fe6270 with lease ID 0xb36dc9713e8dc61f: from storage DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e node DatanodeRegistration(127.0.0.1:35643, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=38713, infoSecurePort=0, ipcPort=37455, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:08,797 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b7b294fe9fe6270 with lease ID 0xb36dc9713e8dc61f: Processing first storage report for DS-9c6d7849-eb9b-4156-923e-96bcfcee6c97 from datanode DatanodeRegistration(127.0.0.1:35643, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=38713, infoSecurePort=0, ipcPort=37455, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415) 2024-11-18T20:29:08,797 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b7b294fe9fe6270 with lease ID 0xb36dc9713e8dc61f: from storage DS-9c6d7849-eb9b-4156-923e-96bcfcee6c97 node DatanodeRegistration(127.0.0.1:35643, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=38713, infoSecurePort=0, ipcPort=37455, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:08,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:08,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:08,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c412a06{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-38157-hadoop-hdfs-3_4_1-tests_jar-_-any-13496592825182913352/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:08,861 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f6e2fed{HTTP/1.1, (http/1.1)}{localhost:38157} 2024-11-18T20:29:08,862 INFO [Time-limited test {}] server.Server(415): Started @147557ms 2024-11-18T20:29:08,863 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:08,925 WARN [Thread-1210 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data3/current/BP-1523093664-172.17.0.2-1731961748415/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:08,925 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data4/current/BP-1523093664-172.17.0.2-1731961748415/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:08,941 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:08,943 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8b2d28b8ae9e01a with lease ID 0xb36dc9713e8dc620: Processing first storage report for DS-0d785930-d55d-4048-9df4-3506ef4dab2e from datanode DatanodeRegistration(127.0.0.1:43537, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=40511, infoSecurePort=0, ipcPort=40969, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415) 2024-11-18T20:29:08,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8b2d28b8ae9e01a with lease ID 0xb36dc9713e8dc620: from storage DS-0d785930-d55d-4048-9df4-3506ef4dab2e node DatanodeRegistration(127.0.0.1:43537, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=40511, infoSecurePort=0, ipcPort=40969, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:08,943 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb8b2d28b8ae9e01a with lease ID 0xb36dc9713e8dc620: Processing first storage report for DS-e42198b1-8041-4e6f-8b38-0784ec7c740f from datanode DatanodeRegistration(127.0.0.1:43537, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=40511, infoSecurePort=0, ipcPort=40969, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415) 2024-11-18T20:29:08,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb8b2d28b8ae9e01a with lease ID 0xb36dc9713e8dc620: from storage DS-e42198b1-8041-4e6f-8b38-0784ec7c740f node DatanodeRegistration(127.0.0.1:43537, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=40511, infoSecurePort=0, ipcPort=40969, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:08,993 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059 2024-11-18T20:29:08,998 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/zookeeper_0, clientPort=51396, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:29:08,999 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51396 2024-11-18T20:29:09,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:29:09,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:29:09,014 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723 with version=8 2024-11-18T20:29:09,014 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:29:09,016 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:29:09,016 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:29:09,017 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42061 2024-11-18T20:29:09,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42061 connecting to ZooKeeper ensemble=127.0.0.1:51396 2024-11-18T20:29:09,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420610x0, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:29:09,023 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42061-0x10054865a730000 connected 2024-11-18T20:29:09,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:09,043 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723, hbase.cluster.distributed=false 2024-11-18T20:29:09,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:29:09,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-18T20:29:09,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42061 2024-11-18T20:29:09,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42061 2024-11-18T20:29:09,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-18T20:29:09,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-18T20:29:09,068 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:29:09,068 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:29:09,069 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:29:09,069 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38945 2024-11-18T20:29:09,071 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38945 connecting to ZooKeeper ensemble=127.0.0.1:51396 2024-11-18T20:29:09,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,073 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389450x0, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:29:09,076 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389450x0, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:09,077 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38945-0x10054865a730001 connected 2024-11-18T20:29:09,077 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:29:09,078 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:29:09,079 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:29:09,080 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:29:09,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38945 2024-11-18T20:29:09,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38945 2024-11-18T20:29:09,081 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38945 2024-11-18T20:29:09,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38945 2024-11-18T20:29:09,085 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38945 2024-11-18T20:29:09,095 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:42061 2024-11-18T20:29:09,096 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:09,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:09,097 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:29:09,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,098 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:29:09,099 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,42061,1731961749016 from backup master directory 2024-11-18T20:29:09,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:09,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:09,099 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:29:09,100 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,106 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/hbase.id] with ID: 701e8201-2d17-4325-95bb-bcddfb83cbda 2024-11-18T20:29:09,106 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/.tmp/hbase.id 2024-11-18T20:29:09,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:29:09,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:29:09,113 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/.tmp/hbase.id]:[hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/hbase.id] 2024-11-18T20:29:09,124 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:09,124 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:29:09,126 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T20:29:09,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:29:09,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:29:09,138 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:29:09,139 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:29:09,140 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:09,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:29:09,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:29:09,148 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store 2024-11-18T20:29:09,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:29:09,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:29:09,155 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:09,155 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:09,155 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961749155Disabling compacts and flushes for region at 1731961749155Disabling writes for close at 1731961749155Writing region close event to WAL at 1731961749155Closed at 1731961749155 2024-11-18T20:29:09,156 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/.initializing 2024-11-18T20:29:09,156 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,158 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C42061%2C1731961749016, suffix=, logDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016, archiveDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/oldWALs, maxLogs=10 2024-11-18T20:29:09,159 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C42061%2C1731961749016.1731961749159 2024-11-18T20:29:09,165 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 2024-11-18T20:29:09,168 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38713:38713),(127.0.0.1/127.0.0.1:40511:40511)] 2024-11-18T20:29:09,169 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:09,169 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:09,169 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,169 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:29:09,172 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:29:09,174 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:09,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:29:09,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:09,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:29:09,177 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:09,178 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,178 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,179 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,180 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,180 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,180 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:29:09,181 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:09,183 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:09,184 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775675, jitterRate=-0.013679295778274536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:29:09,185 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961749169Initializing all the Stores at 1731961749170 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749170Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961749170Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961749171 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961749171Cleaning up temporary data from old regions at 1731961749180 (+9 ms)Region opened successfully at 1731961749184 (+4 ms) 2024-11-18T20:29:09,185 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:29:09,188 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624dea4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:29:09,189 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:29:09,189 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:29:09,189 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:29:09,189 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:29:09,190 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:29:09,190 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:29:09,190 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:29:09,192 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:29:09,193 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:29:09,194 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:29:09,194 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:29:09,194 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:29:09,195 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:29:09,195 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:29:09,198 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:29:09,199 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:29:09,200 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:29:09,201 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:29:09,203 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:29:09,204 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:29:09,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:09,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:09,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,205 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,42061,1731961749016, sessionid=0x10054865a730000, setting cluster-up flag (Was=false) 2024-11-18T20:29:09,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,209 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:29:09,210 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,215 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:29:09,216 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,217 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:29:09,219 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:09,219 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:29:09,219 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:29:09,219 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,42061,1731961749016 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:29:09,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961779227 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:29:09,227 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:29:09,227 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:29:09,227 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:29:09,228 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:29:09,228 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:29:09,228 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961749228,5,FailOnTimeoutGroup] 2024-11-18T20:29:09,228 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:29:09,229 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961749229,5,FailOnTimeoutGroup] 2024-11-18T20:29:09,229 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,229 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:29:09,229 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,229 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:29:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:29:09,241 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:29:09,241 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723 2024-11-18T20:29:09,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:29:09,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:29:09,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:09,251 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:29:09,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:29:09,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:29:09,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:29:09,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:29:09,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:29:09,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:29:09,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:29:09,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:29:09,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740 2024-11-18T20:29:09,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740 2024-11-18T20:29:09,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:29:09,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:29:09,262 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:29:09,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:29:09,264 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:09,265 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875675, jitterRate=0.1134795993566513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:29:09,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961749250Initializing all the Stores at 1731961749250Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749250Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749251 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961749251Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749251Cleaning up temporary data from old regions at 1731961749261 (+10 ms)Region opened successfully at 1731961749265 (+4 ms) 2024-11-18T20:29:09,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:29:09,265 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:29:09,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:29:09,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:29:09,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:29:09,266 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:09,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961749265Disabling compacts and flushes for region at 1731961749265Disabling writes for close at 1731961749265Writing region close event to WAL at 1731961749266 (+1 ms)Closed at 1731961749266 2024-11-18T20:29:09,267 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:09,267 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:29:09,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:29:09,268 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:29:09,269 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:29:09,287 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(746): ClusterId : 701e8201-2d17-4325-95bb-bcddfb83cbda 2024-11-18T20:29:09,287 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:29:09,290 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:29:09,290 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:29:09,292 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:29:09,293 DEBUG [RS:0;c0a89b2656d4:38945 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fe8438, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:29:09,309 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:38945 2024-11-18T20:29:09,310 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:29:09,310 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:29:09,310 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:29:09,310 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,42061,1731961749016 with port=38945, startcode=1731961749068 2024-11-18T20:29:09,311 DEBUG [RS:0;c0a89b2656d4:38945 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:29:09,312 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43705, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:29:09,313 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,313 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,315 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723 2024-11-18T20:29:09,315 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40967 2024-11-18T20:29:09,315 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:29:09,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:29:09,317 DEBUG [RS:0;c0a89b2656d4:38945 {}] zookeeper.ZKUtil(111): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,317 WARN [RS:0;c0a89b2656d4:38945 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:29:09,317 INFO [RS:0;c0a89b2656d4:38945 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:09,317 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,38945,1731961749068] 2024-11-18T20:29:09,317 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,321 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:29:09,323 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:29:09,323 INFO [RS:0;c0a89b2656d4:38945 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:29:09,323 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,323 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:29:09,324 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:29:09,324 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,324 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:29:09,325 DEBUG [RS:0;c0a89b2656d4:38945 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,325 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,38945,1731961749068-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:29:09,340 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:29:09,340 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,38945,1731961749068-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,340 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,340 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.Replication(171): c0a89b2656d4,38945,1731961749068 started 2024-11-18T20:29:09,353 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,353 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,38945,1731961749068, RpcServer on c0a89b2656d4/172.17.0.2:38945, sessionid=0x10054865a730001 2024-11-18T20:29:09,353 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:29:09,353 DEBUG [RS:0;c0a89b2656d4:38945 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,353 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,38945,1731961749068' 2024-11-18T20:29:09,353 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,38945,1731961749068' 2024-11-18T20:29:09,354 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:29:09,355 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:29:09,355 DEBUG [RS:0;c0a89b2656d4:38945 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:29:09,355 INFO [RS:0;c0a89b2656d4:38945 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:29:09,355 INFO [RS:0;c0a89b2656d4:38945 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:29:09,420 WARN [c0a89b2656d4:42061 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:29:09,459 INFO [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C38945%2C1731961749068, suffix=, logDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068, archiveDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs, maxLogs=32 2024-11-18T20:29:09,462 INFO [RS:0;c0a89b2656d4:38945 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:09,471 INFO [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:09,472 DEBUG [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40511:40511),(127.0.0.1/127.0.0.1:38713:38713)] 2024-11-18T20:29:09,670 DEBUG [c0a89b2656d4:42061 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:29:09,671 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,675 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,38945,1731961749068, state=OPENING 2024-11-18T20:29:09,678 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:29:09,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:09,682 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:29:09,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:09,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:09,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,38945,1731961749068}] 2024-11-18T20:29:09,837 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:29:09,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:09,839 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58677, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:29:09,845 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:29:09,845 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:09,848 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C38945%2C1731961749068.meta, suffix=.meta, logDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068, archiveDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs, maxLogs=32 2024-11-18T20:29:09,849 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta 2024-11-18T20:29:09,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:09,856 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta 2024-11-18T20:29:09,857 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40511:40511),(127.0.0.1/127.0.0.1:38713:38713)] 2024-11-18T20:29:09,858 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:29:09,859 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:29:09,859 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:29:09,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:29:09,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:29:09,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:29:09,863 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:29:09,863 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,864 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:29:09,865 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:29:09,865 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:29:09,866 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:29:09,866 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:09,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:09,867 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:29:09,868 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740 2024-11-18T20:29:09,869 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740 2024-11-18T20:29:09,871 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:29:09,871 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:29:09,871 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:29:09,873 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:29:09,873 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764549, jitterRate=-0.0278262197971344}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:29:09,874 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:29:09,874 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961749860Writing region info on filesystem at 1731961749860Initializing all the Stores at 1731961749860Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749860Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749861 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961749861Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961749861Cleaning up temporary data from old regions at 1731961749871 (+10 ms)Running coprocessor post-open hooks at 1731961749874 (+3 ms)Region opened successfully at 1731961749874 2024-11-18T20:29:09,875 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961749836 2024-11-18T20:29:09,878 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:29:09,878 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:29:09,879 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,880 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,38945,1731961749068, state=OPEN 2024-11-18T20:29:09,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:29:09,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:29:09,882 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:09,882 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:09,882 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:09,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:29:09,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,38945,1731961749068 in 200 msec 2024-11-18T20:29:09,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:29:09,888 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-11-18T20:29:09,889 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:09,889 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:29:09,890 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:29:09,890 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,38945,1731961749068, seqNum=-1] 2024-11-18T20:29:09,890 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:29:09,892 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58045, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:29:09,898 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 679 msec 2024-11-18T20:29:09,898 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961749898, completionTime=-1 2024-11-18T20:29:09,898 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:29:09,898 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961809900 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961869900 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:42061, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,900 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,901 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,902 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.804sec 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:29:09,904 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:29:09,907 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:29:09,907 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:29:09,907 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,42061,1731961749016-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:09,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c47a530, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:09,987 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,42061,-1 for getting cluster id 2024-11-18T20:29:09,988 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:29:09,989 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '701e8201-2d17-4325-95bb-bcddfb83cbda' 2024-11-18T20:29:09,989 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:29:09,990 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "701e8201-2d17-4325-95bb-bcddfb83cbda" 2024-11-18T20:29:09,990 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dba7c51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:09,990 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,42061,-1] 2024-11-18T20:29:09,990 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:29:09,990 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:09,992 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:29:09,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a56275c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:09,993 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:29:09,994 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,38945,1731961749068, seqNum=-1] 2024-11-18T20:29:09,994 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:29:09,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59960, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:29:09,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:09,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:10,001 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:29:10,001 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-18T20:29:10,001 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-18T20:29:10,001 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:29:10,002 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:10,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6a876b0e 2024-11-18T20:29:10,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:29:10,004 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59736, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:29:10,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:29:10,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:29:10,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:29:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:29:10,008 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:29:10,008 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:10,008 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-18T20:29:10,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:29:10,010 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:29:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741835_1011 (size=395) 2024-11-18T20:29:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741835_1011 (size=395) 2024-11-18T20:29:10,018 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a1f8ae07b8265e892cf120dd91f8c305, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723 2024-11-18T20:29:10,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35643 is added to blk_1073741836_1012 (size=78) 2024-11-18T20:29:10,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43537 is added to blk_1073741836_1012 (size=78) 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing a1f8ae07b8265e892cf120dd91f8c305, disabling compactions & flushes 2024-11-18T20:29:10,026 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. after waiting 0 ms 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,026 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,026 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for a1f8ae07b8265e892cf120dd91f8c305: Waiting for close lock at 1731961750026Disabling compacts and flushes for region at 1731961750026Disabling writes for close at 1731961750026Writing region close event to WAL at 1731961750026Closed at 1731961750026 2024-11-18T20:29:10,028 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:29:10,028 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731961750028"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961750028"}]},"ts":"1731961750028"} 2024-11-18T20:29:10,030 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:29:10,032 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:29:10,032 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961750032"}]},"ts":"1731961750032"} 2024-11-18T20:29:10,034 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-18T20:29:10,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a1f8ae07b8265e892cf120dd91f8c305, ASSIGN}] 2024-11-18T20:29:10,036 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a1f8ae07b8265e892cf120dd91f8c305, ASSIGN 2024-11-18T20:29:10,037 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a1f8ae07b8265e892cf120dd91f8c305, ASSIGN; state=OFFLINE, location=c0a89b2656d4,38945,1731961749068; forceNewPlan=false, retain=false 2024-11-18T20:29:10,188 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a1f8ae07b8265e892cf120dd91f8c305, regionState=OPENING, regionLocation=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:10,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a1f8ae07b8265e892cf120dd91f8c305, ASSIGN because future has completed 2024-11-18T20:29:10,196 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f8ae07b8265e892cf120dd91f8c305, server=c0a89b2656d4,38945,1731961749068}] 2024-11-18T20:29:10,362 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,362 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a1f8ae07b8265e892cf120dd91f8c305, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:10,363 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,363 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:10,363 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,363 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,366 INFO [StoreOpener-a1f8ae07b8265e892cf120dd91f8c305-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,367 INFO [StoreOpener-a1f8ae07b8265e892cf120dd91f8c305-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1f8ae07b8265e892cf120dd91f8c305 columnFamilyName info 2024-11-18T20:29:10,367 DEBUG [StoreOpener-a1f8ae07b8265e892cf120dd91f8c305-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:10,368 INFO [StoreOpener-a1f8ae07b8265e892cf120dd91f8c305-1 {}] regionserver.HStore(327): Store=a1f8ae07b8265e892cf120dd91f8c305/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:10,368 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,369 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,369 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,370 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,370 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,372 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,375 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:10,375 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a1f8ae07b8265e892cf120dd91f8c305; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812586, jitterRate=0.033257126808166504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:29:10,375 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:10,376 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a1f8ae07b8265e892cf120dd91f8c305: Running coprocessor pre-open hook at 1731961750364Writing region info on filesystem at 1731961750364Initializing all the Stores at 1731961750365 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961750365Cleaning up temporary data from old regions at 1731961750370 (+5 ms)Running coprocessor post-open hooks at 1731961750375 (+5 ms)Region opened successfully at 1731961750376 (+1 ms) 2024-11-18T20:29:10,377 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305., pid=6, masterSystemTime=1731961750352 2024-11-18T20:29:10,380 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,380 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:10,381 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a1f8ae07b8265e892cf120dd91f8c305, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:10,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f8ae07b8265e892cf120dd91f8c305, server=c0a89b2656d4,38945,1731961749068 because future has completed 2024-11-18T20:29:10,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:29:10,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a1f8ae07b8265e892cf120dd91f8c305, server=c0a89b2656d4,38945,1731961749068 in 189 msec 2024-11-18T20:29:10,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:29:10,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=a1f8ae07b8265e892cf120dd91f8c305, ASSIGN in 353 msec 2024-11-18T20:29:10,392 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:29:10,392 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961750392"}]},"ts":"1731961750392"} 2024-11-18T20:29:10,394 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-18T20:29:10,395 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:29:10,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 390 msec 2024-11-18T20:29:10,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:10,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:11,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:11,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:12,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:12,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:13,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:13,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:14,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:14,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:15,366 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:29:15,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,398 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:15,405 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:29:15,406 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-18T20:29:15,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:15,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:16,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:29:16,106 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:29:16,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:29:16,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-18T20:29:16,110 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:16,110 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:29:16,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:16,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:17,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:17,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:18,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:18,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:19,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:19,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:20,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:29:20,049 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-18T20:29:20,049 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-18T20:29:20,057 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:29:20,057 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:20,065 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305., hostname=c0a89b2656d4,38945,1731961749068, seqNum=2] 2024-11-18T20:29:20,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:20,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:21,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:21,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:22,069 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:22,069 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,069 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,070 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,070 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK], DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]) is bad. 2024-11-18T20:29:22,070 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK], DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]) is bad. 2024-11-18T20:29:22,070 WARN [PacketResponder: BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43537] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,070 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK], DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43537,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]) is bad. 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:51978 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51978 dst: /127.0.0.1:35643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1621346920_22 at /127.0.0.1:51944 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51944 dst: /127.0.0.1:35643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1621346920_22 at /127.0.0.1:40608 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40608 dst: /127.0.0.1:43537 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:40642 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40642 dst: /127.0.0.1:43537 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:40648 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40648 dst: /127.0.0.1:43537 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:51976 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51976 dst: /127.0.0.1:35643 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c412a06{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:22,074 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f6e2fed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:22,074 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:22,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f4d9d98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:22,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d8e0e5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:22,075 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:22,075 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:22,075 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 08e54dcc-24a4-4a85-9d3d-c8bf0af91401) service to localhost/127.0.0.1:40967 2024-11-18T20:29:22,075 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:22,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data3/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:22,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data4/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:22,076 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:22,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:22,093 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:22,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:22,096 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:22,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:22,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2146e1ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:22,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54a51f1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:22,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26540011{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-45049-hadoop-hdfs-3_4_1-tests_jar-_-any-15479808588368965886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:22,197 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cd8260b{HTTP/1.1, (http/1.1)}{localhost:45049} 2024-11-18T20:29:22,197 INFO [Time-limited test {}] server.Server(415): Started @160893ms 2024-11-18T20:29:22,198 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:22,214 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,214 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,214 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:22,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:33746 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33746 dst: /127.0.0.1:35643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:33756 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33756 dst: /127.0.0.1:35643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,214 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1621346920_22 at /127.0.0.1:33766 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35643:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33766 dst: /127.0.0.1:35643 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:22,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2586409d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:22,218 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7447a5c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:22,218 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:22,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e543aab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:22,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6cccfd06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:22,221 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:22,221 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:22,221 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc) service to localhost/127.0.0.1:40967 2024-11-18T20:29:22,221 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:22,222 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data1/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:22,222 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data2/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:22,222 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:22,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:22,234 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:22,235 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:22,235 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:22,235 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:29:22,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5792ec07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:22,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b6134cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:22,264 WARN [Thread-1334 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d52516ca55ef67c with lease ID 0xb36dc9713e8dc621: from storage DS-0d785930-d55d-4048-9df4-3506ef4dab2e node DatanodeRegistration(127.0.0.1:43449, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=37501, infoSecurePort=0, ipcPort=42421, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:22,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d52516ca55ef67c with lease ID 0xb36dc9713e8dc621: from storage DS-e42198b1-8041-4e6f-8b38-0784ec7c740f node DatanodeRegistration(127.0.0.1:43449, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=37501, infoSecurePort=0, ipcPort=42421, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:29:22,330 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d417e68{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-43651-hadoop-hdfs-3_4_1-tests_jar-_-any-5669155449377426917/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:22,330 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41b5e377{HTTP/1.1, (http/1.1)}{localhost:43651} 2024-11-18T20:29:22,330 INFO [Time-limited test {}] server.Server(415): Started @161026ms 2024-11-18T20:29:22,331 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:22,393 WARN [Thread-1365 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:22,396 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c6613a7b15abbab with lease ID 0xb36dc9713e8dc622: from storage DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e node DatanodeRegistration(127.0.0.1:42771, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=33315, infoSecurePort=0, ipcPort=33567, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:22,396 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c6613a7b15abbab with lease ID 0xb36dc9713e8dc622: from storage DS-9c6d7849-eb9b-4156-923e-96bcfcee6c97 node DatanodeRegistration(127.0.0.1:42771, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=33315, infoSecurePort=0, ipcPort=33567, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:22,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:22,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:23,349 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-18T20:29:23,354 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-18T20:29:23,356 ERROR [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:23,357 WARN [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:23,357 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C38945%2C1731961749068:(num 1731961749461) roll requested 2024-11-18T20:29:23,357 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:23,364 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 newFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:23,364 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:23,364 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:23,365 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:23,365 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:23,365 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:23,365 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:23,365 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:23,366 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:23,366 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:23,366 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37501:37501),(127.0.0.1/127.0.0.1:33315:33315)] 2024-11-18T20:29:23,366 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 is not closed yet, will try archiving it next time 2024-11-18T20:29:23,366 WARN [IPC Server handler 2 on default port 40967 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-18T20:29:23,367 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 after 1ms 2024-11-18T20:29:23,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:23,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:24,269 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:29:24,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:24,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:25,373 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-18T20:29:25,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:25,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:26,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:26,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:27,368 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 after 4002ms 2024-11-18T20:29:27,381 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:42771,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:27,382 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43449,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK], DatanodeInfoWithStorage[127.0.0.1:42771,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42771,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]) is bad. 2024-11-18T20:29:27,382 WARN [PacketResponder: BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42771] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:27,383 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:44018 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44018 dst: /127.0.0.1:43449 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:27,385 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:52302 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42771:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52302 dst: /127.0.0.1:42771 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:27,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d417e68{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:27,388 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41b5e377{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:27,388 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:27,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b6134cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:27,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5792ec07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:27,390 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:27,390 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:27,390 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc) service to localhost/127.0.0.1:40967 2024-11-18T20:29:27,390 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:27,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data1/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:27,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data2/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:27,391 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:27,398 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:27,402 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:27,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:27,403 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:27,403 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:29:27,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e37a2ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:27,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3527dcb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:27,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e8ee736{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-37615-hadoop-hdfs-3_4_1-tests_jar-_-any-2924570162645062959/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:27,498 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@87fb467{HTTP/1.1, (http/1.1)}{localhost:37615} 2024-11-18T20:29:27,498 INFO [Time-limited test {}] server.Server(415): Started @166194ms 2024-11-18T20:29:27,500 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:27,517 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:27,518 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1561793553_22 at /127.0.0.1:44032 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43449:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44032 dst: /127.0.0.1:43449 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:27,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26540011{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:27,524 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5cd8260b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:27,524 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:27,524 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54a51f1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:27,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2146e1ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:27,526 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:27,526 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 08e54dcc-24a4-4a85-9d3d-c8bf0af91401) service to localhost/127.0.0.1:40967 2024-11-18T20:29:27,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data3/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:27,527 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data4/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:27,527 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:27,527 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:27,527 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:27,539 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:27,542 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:27,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:27,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:27,545 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:29:27,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3393bacb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:27,546 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1c133c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:27,572 WARN [Thread-1408 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dcefc7cad43e11 with lease ID 0xb36dc9713e8dc623: from storage DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e node DatanodeRegistration(127.0.0.1:36105, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=45697, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:29:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4dcefc7cad43e11 with lease ID 0xb36dc9713e8dc623: from storage DS-9c6d7849-eb9b-4156-923e-96bcfcee6c97 node DatanodeRegistration(127.0.0.1:36105, datanodeUuid=5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc, infoPort=45697, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:27,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48d4701a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/java.io.tmpdir/jetty-localhost-43855-hadoop-hdfs-3_4_1-tests_jar-_-any-12122000611650561918/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:27,640 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@730750d3{HTTP/1.1, (http/1.1)}{localhost:43855} 2024-11-18T20:29:27,641 INFO [Time-limited test {}] server.Server(415): Started @166336ms 2024-11-18T20:29:27,642 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:27,704 WARN [Thread-1439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:27,706 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc58bbd09312ec4ea with lease ID 0xb36dc9713e8dc624: from storage DS-0d785930-d55d-4048-9df4-3506ef4dab2e node DatanodeRegistration(127.0.0.1:36125, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=36819, infoSecurePort=0, ipcPort=42801, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T20:29:27,706 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc58bbd09312ec4ea with lease ID 0xb36dc9713e8dc624: from storage DS-e42198b1-8041-4e6f-8b38-0784ec7c740f node DatanodeRegistration(127.0.0.1:36125, datanodeUuid=08e54dcc-24a4-4a85-9d3d-c8bf0af91401, infoPort=36819, infoSecurePort=0, ipcPort=42801, storageInfo=lv=-57;cid=testClusterID;nsid=549276457;c=1731961748415), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:27,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:27,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:28,660 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-18T20:29:28,665 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-18T20:29:28,667 ERROR [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43449,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:28,667 WARN [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43449,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:28,667 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C38945%2C1731961749068:(num 1731961763357) roll requested 2024-11-18T20:29:28,668 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:28,674 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 newFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:28,675 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:28,675 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:28,675 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:28,675 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:28,675 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:28,675 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:28,675 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43449,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:28,676 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43449,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:28,676 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:28,676 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45697:45697),(127.0.0.1/127.0.0.1:36819:36819)] 2024-11-18T20:29:28,676 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 is not closed yet, will try archiving it next time 2024-11-18T20:29:28,676 WARN [IPC Server handler 0 on default port 40967 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-18T20:29:28,676 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 after 0ms 2024-11-18T20:29:28,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:28,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:29,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:29,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:30,677 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:30,688 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 newFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:30,688 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:30,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:30,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:30,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:30,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:30,689 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:30,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45697:45697),(127.0.0.1/127.0.0.1:36819:36819)] 2024-11-18T20:29:30,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 is not closed yet, will try archiving it next time 2024-11-18T20:29:30,691 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 is not closed yet, will try archiving it next time 2024-11-18T20:29:30,691 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:30,691 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:30,691 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 after 0ms 2024-11-18T20:29:30,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741838_1019 (size=1264) 2024-11-18T20:29:30,692 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:30,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741838_1019 (size=1264) 2024-11-18T20:29:30,700 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731961750376/Put/vlen=218/seqid=0] 2024-11-18T20:29:30,700 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731961760067/Put/vlen=1045/seqid=0] 2024-11-18T20:29:30,700 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961749461 2024-11-18T20:29:30,700 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:30,700 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:30,701 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 after 1ms 2024-11-18T20:29:30,701 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:30,705 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731961763356/Put/vlen=1045/seqid=0] 2024-11-18T20:29:30,705 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731961765377/Put/vlen=1045/seqid=0] 2024-11-18T20:29:30,705 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 2024-11-18T20:29:30,705 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:30,705 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:30,705 WARN [IPC Server handler 2 on default port 40967 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 closed. 2024-11-18T20:29:30,706 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 after 1ms 2024-11-18T20:29:30,706 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961768667 2024-11-18T20:29:30,710 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731961768667/Put/vlen=1045/seqid=0] 2024-11-18T20:29:30,710 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:30,710 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:30,711 WARN [IPC Server handler 4 on default port 40967 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-18T20:29:30,711 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 after 1ms 2024-11-18T20:29:30,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:30,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:31,094 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 is not closed yet, will try archiving it next time 2024-11-18T20:29:31,716 WARN [ResponseProcessor for block BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:31,716 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1621346920_22 at /127.0.0.1:57746 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57746 dst: /127.0.0.1:36105 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:36105 remote=/127.0.0.1:57746]. Total timeout mills is 60000, 58972 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:31,717 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 block BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36105,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK], DatanodeInfoWithStorage[127.0.0.1:36125,DS-0d785930-d55d-4048-9df4-3506ef4dab2e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36105,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]) is bad. 2024-11-18T20:29:31,717 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1621346920_22 at /127.0.0.1:38652 [Receiving block BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38652 dst: /127.0.0.1:36125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:29:31,720 WARN [DataStreamer for file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 block BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:31,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741839_1022 (size=85) 2024-11-18T20:29:31,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:31,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:32,679 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961763357 after 4003ms 2024-11-18T20:29:32,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:32,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:33,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:29:33,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:33,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:34,712 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 after 4002ms 2024-11-18T20:29:34,712 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:34,719 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:34,719 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a1f8ae07b8265e892cf120dd91f8c305 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T20:29:34,720 ERROR [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,720 WARN [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,721 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C38945%2C1731961749068:(num 1731961770677) roll requested 2024-11-18T20:29:34,721 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.1731961774721 2024-11-18T20:29:34,728 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 newFile=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961774721 2024-11-18T20:29:34,729 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,729 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961774721 2024-11-18T20:29:34,729 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,730 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1523093664-172.17.0.2-1731961748415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,730 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:34,731 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 after 1ms 2024-11-18T20:29:34,733 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 to hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs/c0a89b2656d4%2C38945%2C1731961749068.1731961770677 2024-11-18T20:29:34,733 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45697:45697),(127.0.0.1/127.0.0.1:36819:36819)] 2024-11-18T20:29:34,747 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/.tmp/info/e09150436d2c4fd89b86cda3de449503 is 1080, key is row1002/info:/1731961760067/Put/seqid=0 2024-11-18T20:29:34,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741841_1024 (size=9270) 2024-11-18T20:29:34,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741841_1024 (size=9270) 2024-11-18T20:29:34,755 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/.tmp/info/e09150436d2c4fd89b86cda3de449503 2024-11-18T20:29:34,762 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/.tmp/info/e09150436d2c4fd89b86cda3de449503 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/info/e09150436d2c4fd89b86cda3de449503 2024-11-18T20:29:34,768 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/info/e09150436d2c4fd89b86cda3de449503, entries=4, sequenceid=8, filesize=9.1 K 2024-11-18T20:29:34,769 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for a1f8ae07b8265e892cf120dd91f8c305 in 50ms, sequenceid=8, compaction requested=false 2024-11-18T20:29:34,769 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a1f8ae07b8265e892cf120dd91f8c305: 2024-11-18T20:29:34,769 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-18T20:29:34,769 ERROR [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,770 WARN [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723-prefix:c0a89b2656d4,38945,1731961749068.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,770 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C38945%2C1731961749068.meta:.meta(num 1731961749849) roll requested 2024-11-18T20:29:34,770 INFO [regionserver/c0a89b2656d4:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C38945%2C1731961749068.meta.1731961774770.meta 2024-11-18T20:29:34,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:34,775 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961774770.meta 2024-11-18T20:29:34,776 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,776 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:34,776 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta 2024-11-18T20:29:34,776 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45697:45697),(127.0.0.1/127.0.0.1:36819:36819)] 2024-11-18T20:29:34,776 DEBUG [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta is not closed yet, will try archiving it next time 2024-11-18T20:29:34,776 WARN [IPC Server handler 1 on default port 40967 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-18T20:29:34,777 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta after 1ms 2024-11-18T20:29:34,790 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/info/829e971bee41446d861b7f25ce7e6017 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305./info:regioninfo/1731961750381/Put/seqid=0 2024-11-18T20:29:34,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741843_1027 (size=7125) 2024-11-18T20:29:34,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741843_1027 (size=7125) 2024-11-18T20:29:34,795 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/info/829e971bee41446d861b7f25ce7e6017 2024-11-18T20:29:34,813 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/ns/72cf5b7025b74888a769b42ee75a83a2 is 43, key is default/ns:d/1731961749892/Put/seqid=0 2024-11-18T20:29:34,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741844_1028 (size=5153) 2024-11-18T20:29:34,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741844_1028 (size=5153) 2024-11-18T20:29:34,818 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/ns/72cf5b7025b74888a769b42ee75a83a2 2024-11-18T20:29:34,838 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/table/661bf17fd3d24aa288db0d59626d0c09 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731961750392/Put/seqid=0 2024-11-18T20:29:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741845_1029 (size=5438) 2024-11-18T20:29:34,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741845_1029 (size=5438) 2024-11-18T20:29:34,843 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/table/661bf17fd3d24aa288db0d59626d0c09 2024-11-18T20:29:34,848 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/info/829e971bee41446d861b7f25ce7e6017 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/info/829e971bee41446d861b7f25ce7e6017 2024-11-18T20:29:34,853 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/info/829e971bee41446d861b7f25ce7e6017, entries=10, sequenceid=11, filesize=7.0 K 2024-11-18T20:29:34,854 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/ns/72cf5b7025b74888a769b42ee75a83a2 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/ns/72cf5b7025b74888a769b42ee75a83a2 2024-11-18T20:29:34,861 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/ns/72cf5b7025b74888a769b42ee75a83a2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:29:34,862 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/.tmp/table/661bf17fd3d24aa288db0d59626d0c09 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/table/661bf17fd3d24aa288db0d59626d0c09 2024-11-18T20:29:34,868 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/table/661bf17fd3d24aa288db0d59626d0c09, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T20:29:34,869 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-18T20:29:34,869 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:29:34,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:34,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:29:34,875 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:29:34,875 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:34,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:34,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:34,875 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:29:34,875 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:29:34,876 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=191438578, stopped=false 2024-11-18T20:29:34,876 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,42061,1731961749016 2024-11-18T20:29:34,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:34,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:34,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:34,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:34,877 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:29:34,877 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:29:34,877 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:34,877 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:34,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:34,877 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:34,878 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,38945,1731961749068' ***** 2024-11-18T20:29:34,878 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:29:34,878 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(3091): Received CLOSE for a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:34,878 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:38945. 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a1f8ae07b8265e892cf120dd91f8c305, disabling compactions & flushes 2024-11-18T20:29:34,879 DEBUG [RS:0;c0a89b2656d4:38945 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:29:34,879 DEBUG [RS:0;c0a89b2656d4:38945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:34,879 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. after waiting 0 ms 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:29:34,879 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:29:34,879 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1325): Online Regions={a1f8ae07b8265e892cf120dd91f8c305=TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:29:34,879 DEBUG [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a1f8ae07b8265e892cf120dd91f8c305 2024-11-18T20:29:34,879 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:29:34,879 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:29:34,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:34,883 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/default/TestLogRolling-testLogRollOnPipelineRestart/a1f8ae07b8265e892cf120dd91f8c305/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-18T20:29:34,883 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:29:34,884 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:34,884 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:34,884 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a1f8ae07b8265e892cf120dd91f8c305: Waiting for close lock at 1731961774879Running coprocessor pre-close hooks at 1731961774879Disabling compacts and flushes for region at 1731961774879Disabling writes for close at 1731961774879Writing region close event to WAL at 1731961774879Running coprocessor post-close hooks at 1731961774883 (+4 ms)Closed at 1731961774883 2024-11-18T20:29:34,884 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:34,884 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731961750005.a1f8ae07b8265e892cf120dd91f8c305. 2024-11-18T20:29:34,884 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961774879Running coprocessor pre-close hooks at 1731961774879Disabling compacts and flushes for region at 1731961774879Disabling writes for close at 1731961774879Writing region close event to WAL at 1731961774880 (+1 ms)Running coprocessor post-close hooks at 1731961774884 (+4 ms)Closed at 1731961774884 2024-11-18T20:29:34,884 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:35,079 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,38945,1731961749068; all regions closed. 2024-11-18T20:29:35,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:35,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:35,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:35,080 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:35,080 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741842_1025 (size=825) 2024-11-18T20:29:35,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741842_1025 (size=825) 2024-11-18T20:29:35,329 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:35,404 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T20:29:35,405 INFO [regionserver/c0a89b2656d4:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T20:29:35,707 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:29:35,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:35,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:29:36,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T20:29:36,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:36,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:37,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:37,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:38,779 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta after 4002ms 2024-11-18T20:29:38,780 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/WALs/c0a89b2656d4,38945,1731961749068/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta to hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs/c0a89b2656d4%2C38945%2C1731961749068.meta.1731961749849.meta 2024-11-18T20:29:38,786 DEBUG [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs 2024-11-18T20:29:38,786 INFO [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C38945%2C1731961749068.meta:.meta(num 1731961774770) 2024-11-18T20:29:38,786 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,787 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,787 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,787 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,787 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741840_1023 (size=1162) 2024-11-18T20:29:38,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741840_1023 (size=1162) 2024-11-18T20:29:38,797 DEBUG [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs 2024-11-18T20:29:38,797 INFO [RS:0;c0a89b2656d4:38945 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C38945%2C1731961749068:(num 1731961774721) 2024-11-18T20:29:38,797 DEBUG [RS:0;c0a89b2656d4:38945 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:38,797 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:29:38,798 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:29:38,798 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T20:29:38,798 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:29:38,798 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:29:38,798 INFO [RS:0;c0a89b2656d4:38945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38945 2024-11-18T20:29:38,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,38945,1731961749068 2024-11-18T20:29:38,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:29:38,800 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:29:38,801 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,38945,1731961749068] 2024-11-18T20:29:38,803 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,38945,1731961749068 already deleted, retry=false 2024-11-18T20:29:38,803 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,38945,1731961749068 expired; onlineServers=0 2024-11-18T20:29:38,803 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,42061,1731961749016' ***** 2024-11-18T20:29:38,803 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:29:38,803 INFO [M:0;c0a89b2656d4:42061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:29:38,803 INFO [M:0;c0a89b2656d4:42061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:29:38,803 DEBUG [M:0;c0a89b2656d4:42061 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:29:38,803 DEBUG [M:0;c0a89b2656d4:42061 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:29:38,803 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:29:38,803 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961749228 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961749228,5,FailOnTimeoutGroup] 2024-11-18T20:29:38,803 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961749229 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961749229,5,FailOnTimeoutGroup] 2024-11-18T20:29:38,803 INFO [M:0;c0a89b2656d4:42061 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:29:38,804 INFO [M:0;c0a89b2656d4:42061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:29:38,804 DEBUG [M:0;c0a89b2656d4:42061 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:29:38,804 INFO [M:0;c0a89b2656d4:42061 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:29:38,804 INFO [M:0;c0a89b2656d4:42061 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:29:38,804 INFO [M:0;c0a89b2656d4:42061 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:29:38,804 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:29:38,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:29:38,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:38,805 DEBUG [M:0;c0a89b2656d4:42061 {}] zookeeper.ZKUtil(347): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:29:38,805 WARN [M:0;c0a89b2656d4:42061 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:29:38,806 INFO [M:0;c0a89b2656d4:42061 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/.lastflushedseqids 2024-11-18T20:29:38,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741846_1030 (size=130) 2024-11-18T20:29:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741846_1030 (size=130) 2024-11-18T20:29:38,812 INFO [M:0;c0a89b2656d4:42061 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:29:38,812 INFO [M:0;c0a89b2656d4:42061 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:29:38,812 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:29:38,812 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:38,812 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:38,812 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:29:38,812 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:38,813 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-18T20:29:38,813 ERROR [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData-prefix:c0a89b2656d4,42061,1731961749016 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:38,813 WARN [FSHLog-0-hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData-prefix:c0a89b2656d4,42061,1731961749016 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:38,813 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c0a89b2656d4%2C42061%2C1731961749016:(num 1731961749159) roll requested 2024-11-18T20:29:38,813 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C42061%2C1731961749016.1731961778813 2024-11-18T20:29:38,818 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,819 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,819 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,819 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,819 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,819 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961778813 2024-11-18T20:29:38,820 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:38,820 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35643,DS-44af0ea9-e051-40d9-b4c4-83c6bbf5c94e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T20:29:38,820 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 2024-11-18T20:29:38,820 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45697:45697),(127.0.0.1/127.0.0.1:36819:36819)] 2024-11-18T20:29:38,820 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 is not closed yet, will try archiving it next time 2024-11-18T20:29:38,820 WARN [IPC Server handler 0 on default port 40967 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-18T20:29:38,821 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 after 1ms 2024-11-18T20:29:38,840 DEBUG [M:0;c0a89b2656d4:42061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/07f81030d2f343fca03811590fd98262 is 82, key is hbase:meta,,1/info:regioninfo/1731961749878/Put/seqid=0 2024-11-18T20:29:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741848_1033 (size=5672) 2024-11-18T20:29:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741848_1033 (size=5672) 2024-11-18T20:29:38,845 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/07f81030d2f343fca03811590fd98262 2024-11-18T20:29:38,864 DEBUG [M:0;c0a89b2656d4:42061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8eb843927360445c9929761ab77ca741 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961750397/Put/seqid=0 2024-11-18T20:29:38,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741849_1034 (size=6117) 2024-11-18T20:29:38,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741849_1034 (size=6117) 2024-11-18T20:29:38,869 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8eb843927360445c9929761ab77ca741 2024-11-18T20:29:38,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:38,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:38,889 DEBUG [M:0;c0a89b2656d4:42061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/db33af98d81647c5997c836b2c0f050e is 69, key is c0a89b2656d4,38945,1731961749068/rs:state/1731961749313/Put/seqid=0 2024-11-18T20:29:38,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741850_1035 (size=5156) 2024-11-18T20:29:38,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741850_1035 (size=5156) 2024-11-18T20:29:38,894 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/db33af98d81647c5997c836b2c0f050e 2024-11-18T20:29:38,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:38,902 INFO [RS:0;c0a89b2656d4:38945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:29:38,902 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38945-0x10054865a730001, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:38,902 INFO [RS:0;c0a89b2656d4:38945 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,38945,1731961749068; zookeeper connection closed. 2024-11-18T20:29:38,902 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@59938716 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@59938716 2024-11-18T20:29:38,902 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:29:38,913 DEBUG [M:0;c0a89b2656d4:42061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/249699ac12474bb8b6d7c75ae036d771 is 52, key is load_balancer_on/state:d/1731961750000/Put/seqid=0 2024-11-18T20:29:38,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741851_1036 (size=5056) 2024-11-18T20:29:38,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741851_1036 (size=5056) 2024-11-18T20:29:38,918 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/249699ac12474bb8b6d7c75ae036d771 2024-11-18T20:29:38,923 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/07f81030d2f343fca03811590fd98262 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/07f81030d2f343fca03811590fd98262 2024-11-18T20:29:38,928 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/07f81030d2f343fca03811590fd98262, entries=8, sequenceid=56, filesize=5.5 K 2024-11-18T20:29:38,929 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8eb843927360445c9929761ab77ca741 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8eb843927360445c9929761ab77ca741 2024-11-18T20:29:38,934 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8eb843927360445c9929761ab77ca741, entries=6, sequenceid=56, filesize=6.0 K 2024-11-18T20:29:38,935 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/db33af98d81647c5997c836b2c0f050e as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/db33af98d81647c5997c836b2c0f050e 2024-11-18T20:29:38,940 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/db33af98d81647c5997c836b2c0f050e, entries=1, sequenceid=56, filesize=5.0 K 2024-11-18T20:29:38,941 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/249699ac12474bb8b6d7c75ae036d771 as hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/249699ac12474bb8b6d7c75ae036d771 2024-11-18T20:29:38,946 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/249699ac12474bb8b6d7c75ae036d771, entries=1, sequenceid=56, filesize=4.9 K 2024-11-18T20:29:38,947 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false 2024-11-18T20:29:38,948 INFO [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:38,948 DEBUG [M:0;c0a89b2656d4:42061 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961778812Disabling compacts and flushes for region at 1731961778812Disabling writes for close at 1731961778812Obtaining lock to block concurrent updates at 1731961778813 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961778813Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731961778813Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961778821 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961778821Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961778840 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961778840Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961778850 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961778864 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961778864Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961778874 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961778889 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961778889Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961778899 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961778912 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961778912Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fed46a: reopening flushed file at 1731961778923 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51d9f195: reopening flushed file at 1731961778928 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7090be90: reopening flushed file at 1731961778934 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5111b014: reopening flushed file at 1731961778940 (+6 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=56, compaction requested=false at 1731961778947 (+7 ms)Writing region close event to WAL at 1731961778948 (+1 ms)Closed at 1731961778948 2024-11-18T20:29:38,948 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,949 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,949 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,949 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:29:38,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741847_1031 (size=757) 2024-11-18T20:29:38,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36105 is added to blk_1073741847_1031 (size=757) 2024-11-18T20:29:38,993 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:29:39,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:39,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,914 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:39,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,427 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:29:40,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:40,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:40,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:41,710 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T20:29:41,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:41,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:42,822 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 after 4002ms 2024-11-18T20:29:42,823 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/WALs/c0a89b2656d4,42061,1731961749016/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 to hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/oldWALs/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 2024-11-18T20:29:42,830 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/MasterData/oldWALs/c0a89b2656d4%2C42061%2C1731961749016.1731961749159 to hdfs://localhost:40967/user/jenkins/test-data/3bfaad25-57f9-cf8e-9d66-49daf6a6d723/oldWALs/c0a89b2656d4%2C42061%2C1731961749016.1731961749159$masterlocalwal$ 2024-11-18T20:29:42,830 INFO [M:0;c0a89b2656d4:42061 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:29:42,830 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:29:42,831 INFO [M:0;c0a89b2656d4:42061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42061 2024-11-18T20:29:42,831 INFO [M:0;c0a89b2656d4:42061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:29:42,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:42,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:42,933 INFO [M:0;c0a89b2656d4:42061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:29:42,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:42,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x10054865a730000, quorum=127.0.0.1:51396, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:29:42,938 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48d4701a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:42,939 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@730750d3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:42,939 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:42,939 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1c133c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:42,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3393bacb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:42,942 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:42,942 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 08e54dcc-24a4-4a85-9d3d-c8bf0af91401) service to localhost/127.0.0.1:40967 2024-11-18T20:29:42,943 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data3/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:42,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data4/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:42,944 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:42,944 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:42,944 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:42,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e8ee736{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:42,946 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@87fb467{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:42,946 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:42,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3527dcb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:42,946 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e37a2ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:42,947 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:29:42,947 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:29:42,947 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:29:42,947 WARN [BP-1523093664-172.17.0.2-1731961748415 heartbeating to localhost/127.0.0.1:40967 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1523093664-172.17.0.2-1731961748415 (Datanode Uuid 5bf929d5-3c92-4666-a3a6-a3bc1a9c0adc) service to localhost/127.0.0.1:40967 2024-11-18T20:29:42,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data1/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:42,948 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/cluster_c58938e3-9b6b-2547-701f-34d69eab66d8/data/data2/current/BP-1523093664-172.17.0.2-1731961748415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:29:42,948 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:29:42,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53f1ce1d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:29:42,953 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2024a4cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:29:42,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:29:42,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521c98fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:29:42,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47f3561f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir/,STOPPED} 2024-11-18T20:29:42,959 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:29:42,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:29:42,987 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 153) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40967 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40967 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40967 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40967 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40967 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40967 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40967 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40967 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=28 (was 50), ProcessCount=11 (was 11), AvailableMemoryMB=2513 (was 2676) 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=28, ProcessCount=11, AvailableMemoryMB=2513 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.log.dir so I do NOT create it in target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ae12ee6d-669e-72d2-9ecd-1fd9f9ec9059/hadoop.tmp.dir so I do NOT create it in target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6, deleteOnExit=true 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/test.cache.data in system properties and HBase conf 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:29:42,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:29:42,995 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:29:42,995 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:29:42,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:29:42,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:29:43,009 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:29:43,053 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:43,057 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:43,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:43,058 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:43,058 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:43,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:43,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a789efc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:43,059 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4529c569{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:43,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@789c469a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/java.io.tmpdir/jetty-localhost-37207-hadoop-hdfs-3_4_1-tests_jar-_-any-14703257941268531276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:29:43,151 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2413c723{HTTP/1.1, (http/1.1)}{localhost:37207} 2024-11-18T20:29:43,151 INFO [Time-limited test {}] server.Server(415): Started @181846ms 2024-11-18T20:29:43,162 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:29:43,195 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:43,199 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:43,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:43,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:43,200 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:43,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a649f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:43,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fab2cd0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:43,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3adf1c78{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/java.io.tmpdir/jetty-localhost-33349-hadoop-hdfs-3_4_1-tests_jar-_-any-6479124907103059853/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:43,295 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@686f71a5{HTTP/1.1, (http/1.1)}{localhost:33349} 2024-11-18T20:29:43,295 INFO [Time-limited test {}] server.Server(415): Started @181990ms 2024-11-18T20:29:43,296 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:43,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:29:43,326 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:29:43,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:29:43,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:29:43,327 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:29:43,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@aa5bbf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:29:43,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31884fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:29:43,360 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data2/current/BP-1002095311-172.17.0.2-1731961783019/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:43,360 WARN [Thread-1633 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data1/current/BP-1002095311-172.17.0.2-1731961783019/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:43,374 WARN [Thread-1612 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f05f66c760356ac with lease ID 0x69b012dbb0474cab: Processing first storage report for DS-82df32c3-2b13-4e5b-9a25-c18cff36e6b8 from datanode DatanodeRegistration(127.0.0.1:43941, datanodeUuid=605857fb-13de-46f2-b3a1-74611633ec95, infoPort=45345, infoSecurePort=0, ipcPort=39307, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019) 2024-11-18T20:29:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f05f66c760356ac with lease ID 0x69b012dbb0474cab: from storage DS-82df32c3-2b13-4e5b-9a25-c18cff36e6b8 node DatanodeRegistration(127.0.0.1:43941, datanodeUuid=605857fb-13de-46f2-b3a1-74611633ec95, infoPort=45345, infoSecurePort=0, ipcPort=39307, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:43,376 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5f05f66c760356ac with lease ID 0x69b012dbb0474cab: Processing first storage report for DS-fb9f1bb8-7377-43a6-8fe9-39d11a291d5e from datanode DatanodeRegistration(127.0.0.1:43941, datanodeUuid=605857fb-13de-46f2-b3a1-74611633ec95, infoPort=45345, infoSecurePort=0, ipcPort=39307, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019) 2024-11-18T20:29:43,377 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5f05f66c760356ac with lease ID 0x69b012dbb0474cab: from storage DS-fb9f1bb8-7377-43a6-8fe9-39d11a291d5e node DatanodeRegistration(127.0.0.1:43941, datanodeUuid=605857fb-13de-46f2-b3a1-74611633ec95, infoPort=45345, infoSecurePort=0, ipcPort=39307, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:43,430 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2186ae4f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/java.io.tmpdir/jetty-localhost-37423-hadoop-hdfs-3_4_1-tests_jar-_-any-1125744948395494231/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:29:43,430 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e1439bb{HTTP/1.1, (http/1.1)}{localhost:37423} 2024-11-18T20:29:43,430 INFO [Time-limited test {}] server.Server(415): Started @182125ms 2024-11-18T20:29:43,431 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:29:43,485 WARN [Thread-1659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data3/current/BP-1002095311-172.17.0.2-1731961783019/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:43,485 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data4/current/BP-1002095311-172.17.0.2-1731961783019/current, will proceed with Du for space computation calculation, 2024-11-18T20:29:43,503 WARN [Thread-1648 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:29:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34d857d67e6586fb with lease ID 0x69b012dbb0474cac: Processing first storage report for DS-710121d5-b5ea-4565-99bf-c09edb65950d from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=a347b1fd-f7ac-4154-b483-208bc023db47, infoPort=40863, infoSecurePort=0, ipcPort=44133, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019) 2024-11-18T20:29:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34d857d67e6586fb with lease ID 0x69b012dbb0474cac: from storage DS-710121d5-b5ea-4565-99bf-c09edb65950d node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=a347b1fd-f7ac-4154-b483-208bc023db47, infoPort=40863, infoSecurePort=0, ipcPort=44133, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x34d857d67e6586fb with lease ID 0x69b012dbb0474cac: Processing first storage report for DS-6f467c0e-31a2-4310-a780-44f0c68b7e72 from datanode DatanodeRegistration(127.0.0.1:40163, datanodeUuid=a347b1fd-f7ac-4154-b483-208bc023db47, infoPort=40863, infoSecurePort=0, ipcPort=44133, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019) 2024-11-18T20:29:43,505 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x34d857d67e6586fb with lease ID 0x69b012dbb0474cac: from storage DS-6f467c0e-31a2-4310-a780-44f0c68b7e72 node DatanodeRegistration(127.0.0.1:40163, datanodeUuid=a347b1fd-f7ac-4154-b483-208bc023db47, infoPort=40863, infoSecurePort=0, ipcPort=44133, storageInfo=lv=-57;cid=testClusterID;nsid=661370367;c=1731961783019), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:29:43,554 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4 2024-11-18T20:29:43,557 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/zookeeper_0, clientPort=53321, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:29:43,558 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53321 2024-11-18T20:29:43,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,559 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:29:43,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:29:43,570 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268 with version=8 2024-11-18T20:29:43,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:29:43,573 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:29:43,574 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:29:43,575 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44371 2024-11-18T20:29:43,577 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44371 connecting to ZooKeeper ensemble=127.0.0.1:53321 2024-11-18T20:29:43,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443710x0, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:29:43,581 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44371-0x1005486e1730000 connected 2024-11-18T20:29:43,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:43,599 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268, hbase.cluster.distributed=false 2024-11-18T20:29:43,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:29:43,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44371 2024-11-18T20:29:43,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44371 2024-11-18T20:29:43,607 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44371 2024-11-18T20:29:43,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44371 2024-11-18T20:29:43,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44371 2024-11-18T20:29:43,622 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:29:43,622 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:29:43,623 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34539 2024-11-18T20:29:43,624 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34539 connecting to ZooKeeper ensemble=127.0.0.1:53321 2024-11-18T20:29:43,625 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,626 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345390x0, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:29:43,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345390x0, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:29:43,631 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34539-0x1005486e1730001 connected 2024-11-18T20:29:43,631 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:29:43,631 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:29:43,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:29:43,633 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:29:43,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-18T20:29:43,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34539 2024-11-18T20:29:43,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34539 2024-11-18T20:29:43,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-18T20:29:43,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-18T20:29:43,648 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:44371 2024-11-18T20:29:43,648 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:43,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:43,650 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:29:43,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,651 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:29:43,651 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,44371,1731961783573 from backup master directory 2024-11-18T20:29:43,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:43,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:29:43,652 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:29:43,652 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,658 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/hbase.id] with ID: f9adb26b-3aa2-4ace-8980-9a3061a4d119 2024-11-18T20:29:43,658 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/.tmp/hbase.id 2024-11-18T20:29:43,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:29:43,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:29:43,667 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/.tmp/hbase.id]:[hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/hbase.id] 2024-11-18T20:29:43,678 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:43,678 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:29:43,679 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:29:43,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:29:43,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:29:43,692 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:29:43,693 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:29:43,701 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:29:43,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:29:43,710 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store 2024-11-18T20:29:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:29:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:29:43,719 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:43,719 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:29:43,720 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:43,720 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:43,720 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:29:43,720 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:43,720 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:29:43,720 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961783719Disabling compacts and flushes for region at 1731961783719Disabling writes for close at 1731961783720 (+1 ms)Writing region close event to WAL at 1731961783720Closed at 1731961783720 2024-11-18T20:29:43,721 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/.initializing 2024-11-18T20:29:43,721 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/WALs/c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,723 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C44371%2C1731961783573, suffix=, logDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/WALs/c0a89b2656d4,44371,1731961783573, archiveDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/oldWALs, maxLogs=10 2024-11-18T20:29:43,723 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C44371%2C1731961783573.1731961783723 2024-11-18T20:29:43,728 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/WALs/c0a89b2656d4,44371,1731961783573/c0a89b2656d4%2C44371%2C1731961783573.1731961783723 2024-11-18T20:29:43,728 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45345:45345),(127.0.0.1/127.0.0.1:40863:40863)] 2024-11-18T20:29:43,733 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:43,733 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:43,733 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,733 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:29:43,736 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:43,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,737 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:29:43,737 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:43,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:29:43,739 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:43,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:29:43,741 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:43,741 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,742 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,742 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,743 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,743 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,744 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:29:43,745 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:29:43,747 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:43,748 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709164, jitterRate=-0.09825138747692108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:29:43,748 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961783733Initializing all the Stores at 1731961783734 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961783734Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961783734Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961783734Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961783734Cleaning up temporary data from old regions at 1731961783743 (+9 ms)Region opened successfully at 1731961783748 (+5 ms) 2024-11-18T20:29:43,749 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:29:43,752 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a07e8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:29:43,753 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:29:43,753 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:29:43,753 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:29:43,753 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:29:43,754 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:29:43,754 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:29:43,754 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:29:43,756 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:29:43,757 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:29:43,757 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:29:43,758 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:29:43,759 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:29:43,759 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:29:43,760 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:29:43,761 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:29:43,761 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:29:43,762 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:29:43,763 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:29:43,765 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:29:43,765 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:29:43,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:43,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:29:43,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,767 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,44371,1731961783573, sessionid=0x1005486e1730000, setting cluster-up flag (Was=false) 2024-11-18T20:29:43,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,771 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:29:43,772 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:43,777 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:29:43,778 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:43,779 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:29:43,780 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:43,780 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:29:43,780 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:29:43,781 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,44371,1731961783573 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:29:43,782 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961813783 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:29:43,783 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:29:43,783 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:29:43,783 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:29:43,784 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:29:43,784 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,784 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961783784,5,FailOnTimeoutGroup] 2024-11-18T20:29:43,785 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961783785,5,FailOnTimeoutGroup] 2024-11-18T20:29:43,785 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,785 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:29:43,785 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,785 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:29:43,785 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:29:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:29:43,791 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:29:43,791 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268 2024-11-18T20:29:43,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:29:43,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:29:43,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:43,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:29:43,798 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:29:43,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:43,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:29:43,800 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:29:43,800 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:43,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:29:43,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:29:43,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:43,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:29:43,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:29:43,803 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:43,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:43,804 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:29:43,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740 2024-11-18T20:29:43,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740 2024-11-18T20:29:43,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:29:43,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:29:43,807 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:29:43,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:29:43,810 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:43,810 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762620, jitterRate=-0.03027881681919098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:29:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961783796Initializing all the Stores at 1731961783797 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961783797Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961783797Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961783797Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961783797Cleaning up temporary data from old regions at 1731961783806 (+9 ms)Region opened successfully at 1731961783811 (+5 ms) 2024-11-18T20:29:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:29:43,811 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:29:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:29:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:29:43,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:29:43,813 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:29:43,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961783811Disabling compacts and flushes for region at 1731961783811Disabling writes for close at 1731961783811Writing region close event to WAL at 1731961783813 (+2 ms)Closed at 1731961783813 2024-11-18T20:29:43,815 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:43,815 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:29:43,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:29:43,816 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:29:43,818 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:29:43,840 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(746): ClusterId : f9adb26b-3aa2-4ace-8980-9a3061a4d119 2024-11-18T20:29:43,840 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:29:43,843 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:29:43,843 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:29:43,845 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:29:43,846 DEBUG [RS:0;c0a89b2656d4:34539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ff809ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:29:43,862 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:34539 2024-11-18T20:29:43,862 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:29:43,862 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:29:43,862 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:29:43,863 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,44371,1731961783573 with port=34539, startcode=1731961783622 2024-11-18T20:29:43,863 DEBUG [RS:0;c0a89b2656d4:34539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:29:43,865 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:29:43,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44371 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44371 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,866 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268 2024-11-18T20:29:43,866 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40257 2024-11-18T20:29:43,866 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:29:43,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:29:43,868 DEBUG [RS:0;c0a89b2656d4:34539 {}] zookeeper.ZKUtil(111): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,868 WARN [RS:0;c0a89b2656d4:34539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:29:43,868 INFO [RS:0;c0a89b2656d4:34539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:43,868 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,869 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,34539,1731961783622] 2024-11-18T20:29:43,871 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:29:43,873 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:29:43,873 INFO [RS:0;c0a89b2656d4:34539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:29:43,873 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,873 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:29:43,874 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:29:43,874 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,874 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:29:43,875 DEBUG [RS:0;c0a89b2656d4:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,876 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34539,1731961783622-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:29:43,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:43,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:43,892 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:29:43,892 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,34539,1731961783622-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,892 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,892 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.Replication(171): c0a89b2656d4,34539,1731961783622 started 2024-11-18T20:29:43,904 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:43,904 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,34539,1731961783622, RpcServer on c0a89b2656d4/172.17.0.2:34539, sessionid=0x1005486e1730001 2024-11-18T20:29:43,904 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:29:43,904 DEBUG [RS:0;c0a89b2656d4:34539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,904 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,34539,1731961783622' 2024-11-18T20:29:43,904 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:29:43,905 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,34539,1731961783622' 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:29:43,906 DEBUG [RS:0;c0a89b2656d4:34539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:29:43,906 INFO [RS:0;c0a89b2656d4:34539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:29:43,906 INFO [RS:0;c0a89b2656d4:34539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:29:43,968 WARN [c0a89b2656d4:44371 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:29:44,010 INFO [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C34539%2C1731961783622, suffix=, logDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622, archiveDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs, maxLogs=32 2024-11-18T20:29:44,011 INFO [RS:0;c0a89b2656d4:34539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34539%2C1731961783622.1731961784011 2024-11-18T20:29:44,022 INFO [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961784011 2024-11-18T20:29:44,023 DEBUG [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45345:45345),(127.0.0.1/127.0.0.1:40863:40863)] 2024-11-18T20:29:44,218 DEBUG [c0a89b2656d4:44371 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:29:44,219 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:44,223 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,34539,1731961783622, state=OPENING 2024-11-18T20:29:44,225 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:29:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:29:44,226 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:29:44,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34539,1731961783622}] 2024-11-18T20:29:44,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:44,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:44,381 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:29:44,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46647, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:29:44,391 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:29:44,391 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:29:44,393 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C34539%2C1731961783622.meta, suffix=.meta, logDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622, archiveDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs, maxLogs=32 2024-11-18T20:29:44,393 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34539%2C1731961783622.meta.1731961784393.meta 2024-11-18T20:29:44,397 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.meta.1731961784393.meta 2024-11-18T20:29:44,400 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40863:40863),(127.0.0.1/127.0.0.1:45345:45345)] 2024-11-18T20:29:44,404 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:29:44,405 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:29:44,405 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:29:44,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:29:44,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:29:44,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:44,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:44,408 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:29:44,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:29:44,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:44,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:44,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:29:44,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:29:44,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:44,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:44,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:29:44,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:29:44,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:44,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:29:44,411 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:29:44,412 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740 2024-11-18T20:29:44,413 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740 2024-11-18T20:29:44,414 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:29:44,414 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:29:44,414 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:29:44,416 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:29:44,416 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=773641, jitterRate=-0.0162656307220459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:29:44,416 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:29:44,417 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961784405Writing region info on filesystem at 1731961784405Initializing all the Stores at 1731961784406 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961784406Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961784406Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961784406Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961784406Cleaning up temporary data from old regions at 1731961784414 (+8 ms)Running coprocessor post-open hooks at 1731961784416 (+2 ms)Region opened successfully at 1731961784417 (+1 ms) 2024-11-18T20:29:44,418 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961784381 2024-11-18T20:29:44,420 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:29:44,420 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:29:44,421 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:44,422 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,34539,1731961783622, state=OPEN 2024-11-18T20:29:44,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:29:44,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:29:44,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:44,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:29:44,424 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:44,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:29:44,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,34539,1731961783622 in 197 msec 2024-11-18T20:29:44,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:29:44,429 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-18T20:29:44,430 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:29:44,430 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:29:44,431 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:29:44,431 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,34539,1731961783622, seqNum=-1] 2024-11-18T20:29:44,432 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:29:44,433 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34417, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:29:44,439 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-18T20:29:44,439 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961784439, completionTime=-1 2024-11-18T20:29:44,439 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:29:44,440 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:29:44,441 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:29:44,441 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961844441 2024-11-18T20:29:44,441 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961904441 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:44371, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,442 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,444 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:29:44,444 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:29:44,447 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:29:44,448 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:29:44,448 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:29:44,451 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:29:44,451 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:29:44,451 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,44371,1731961783573-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:29:44,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15cc44b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:44,540 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,44371,-1 for getting cluster id 2024-11-18T20:29:44,540 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:29:44,542 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9adb26b-3aa2-4ace-8980-9a3061a4d119' 2024-11-18T20:29:44,542 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:29:44,542 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9adb26b-3aa2-4ace-8980-9a3061a4d119" 2024-11-18T20:29:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23273074, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,44371,-1] 2024-11-18T20:29:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:29:44,543 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:29:44,545 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:29:44,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5afe5563, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:29:44,547 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:29:44,548 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,34539,1731961783622, seqNum=-1] 2024-11-18T20:29:44,548 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:29:44,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:29:44,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:44,552 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:29:44,555 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:29:44,556 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:29:44,557 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c0a89b2656d4,44371,1731961783573 2024-11-18T20:29:44,557 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@46a5923 2024-11-18T20:29:44,557 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:29:44,558 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:29:44,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:29:44,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:29:44,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:29:44,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:29:44,562 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:29:44,562 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:44,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-18T20:29:44,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:29:44,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:29:44,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741835_1011 (size=405) 2024-11-18T20:29:44,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741835_1011 (size=405) 2024-11-18T20:29:44,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:44,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:44,976 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 01f6d0ddf5a713fb68de04ee6d52b2a5, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268 2024-11-18T20:29:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741836_1012 (size=88) 2024-11-18T20:29:44,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741836_1012 (size=88) 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 01f6d0ddf5a713fb68de04ee6d52b2a5, disabling compactions & flushes 2024-11-18T20:29:44,990 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. after waiting 0 ms 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:44,990 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:44,990 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: Waiting for close lock at 1731961784990Disabling compacts and flushes for region at 1731961784990Disabling writes for close at 1731961784990Writing region close event to WAL at 1731961784990Closed at 1731961784990 2024-11-18T20:29:44,993 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:29:44,993 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731961784993"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961784993"}]},"ts":"1731961784993"} 2024-11-18T20:29:44,996 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:29:44,998 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:29:44,998 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961784998"}]},"ts":"1731961784998"} 2024-11-18T20:29:45,000 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-18T20:29:45,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5, ASSIGN}] 2024-11-18T20:29:45,003 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5, ASSIGN 2024-11-18T20:29:45,004 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5, ASSIGN; state=OFFLINE, location=c0a89b2656d4,34539,1731961783622; forceNewPlan=false, retain=false 2024-11-18T20:29:45,155 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=01f6d0ddf5a713fb68de04ee6d52b2a5, regionState=OPENING, regionLocation=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:45,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5, ASSIGN because future has completed 2024-11-18T20:29:45,163 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 01f6d0ddf5a713fb68de04ee6d52b2a5, server=c0a89b2656d4,34539,1731961783622}] 2024-11-18T20:29:45,323 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:45,324 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 01f6d0ddf5a713fb68de04ee6d52b2a5, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:29:45,324 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,324 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:29:45,324 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,324 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,326 INFO [StoreOpener-01f6d0ddf5a713fb68de04ee6d52b2a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,329 INFO [StoreOpener-01f6d0ddf5a713fb68de04ee6d52b2a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 01f6d0ddf5a713fb68de04ee6d52b2a5 columnFamilyName info 2024-11-18T20:29:45,329 DEBUG [StoreOpener-01f6d0ddf5a713fb68de04ee6d52b2a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:29:45,330 INFO [StoreOpener-01f6d0ddf5a713fb68de04ee6d52b2a5-1 {}] regionserver.HStore(327): Store=01f6d0ddf5a713fb68de04ee6d52b2a5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:29:45,330 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,331 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,332 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,333 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,333 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,336 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,339 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:29:45,340 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 01f6d0ddf5a713fb68de04ee6d52b2a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=860039, jitterRate=0.09359675645828247}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:29:45,340 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:29:45,341 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: Running coprocessor pre-open hook at 1731961785324Writing region info on filesystem at 1731961785324Initializing all the Stores at 1731961785326 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961785326Cleaning up temporary data from old regions at 1731961785333 (+7 ms)Running coprocessor post-open hooks at 1731961785340 (+7 ms)Region opened successfully at 1731961785341 (+1 ms) 2024-11-18T20:29:45,343 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5., pid=6, masterSystemTime=1731961785318 2024-11-18T20:29:45,345 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:45,346 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:45,347 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=01f6d0ddf5a713fb68de04ee6d52b2a5, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,34539,1731961783622 2024-11-18T20:29:45,348 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44371 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=c0a89b2656d4,34539,1731961783622, table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-18T20:29:45,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 01f6d0ddf5a713fb68de04ee6d52b2a5, server=c0a89b2656d4,34539,1731961783622 because future has completed 2024-11-18T20:29:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:29:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 01f6d0ddf5a713fb68de04ee6d52b2a5, server=c0a89b2656d4,34539,1731961783622 in 188 msec 2024-11-18T20:29:45,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:29:45,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=01f6d0ddf5a713fb68de04ee6d52b2a5, ASSIGN in 353 msec 2024-11-18T20:29:45,357 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:29:45,357 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961785357"}]},"ts":"1731961785357"} 2024-11-18T20:29:45,360 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-18T20:29:45,361 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:29:45,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 802 msec 2024-11-18T20:29:45,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:45,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:46,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:29:46,106 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:29:46,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:29:46,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:29:46,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:29:46,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:29:46,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:46,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:47,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:47,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:48,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:48,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:49,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:49,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:49,910 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:29:49,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:29:49,943 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:29:49,943 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-18T20:29:50,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:50,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:51,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:51,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:52,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:52,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:53,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:53,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:54,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:29:54,608 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:29:54,608 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-18T20:29:54,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:29:54,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:54,618 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5., hostname=c0a89b2656d4,34539,1731961783622, seqNum=2] 2024-11-18T20:29:54,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:29:54,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:29:54,632 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:29:54,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T20:29:54,634 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:29:54,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:29:54,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-18T20:29:54,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:54,802 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:29:54,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/e12fe8979e8045e896b8880e80ba9a15 is 1080, key is row0001/info:/1731961794619/Put/seqid=0 2024-11-18T20:29:54,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741837_1013 (size=6033) 2024-11-18T20:29:54,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741837_1013 (size=6033) 2024-11-18T20:29:54,829 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/e12fe8979e8045e896b8880e80ba9a15 2024-11-18T20:29:54,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/e12fe8979e8045e896b8880e80ba9a15 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15 2024-11-18T20:29:54,840 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15, entries=1, sequenceid=5, filesize=5.9 K 2024-11-18T20:29:54,841 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 38ms, sequenceid=5, compaction requested=false 2024-11-18T20:29:54,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: 2024-11-18T20:29:54,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:29:54,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-18T20:29:54,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-18T20:29:54,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T20:29:54,848 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-18T20:29:54,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 222 msec 2024-11-18T20:29:54,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:54,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:55,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:55,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:56,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:56,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:57,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:57,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:58,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:58,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:59,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:29:59,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:00,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:00,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:01,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:01,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:02,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:02,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:03,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:03,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:04,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T20:30:04,707 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:30:04,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:04,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:04,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T20:30:04,712 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:30:04,713 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:30:04,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:30:04,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-18T20:30:04,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:04,867 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:30:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/7688a979e4354e9f9c263b872b3ca965 is 1080, key is row0002/info:/1731961804708/Put/seqid=0 2024-11-18T20:30:04,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741838_1014 (size=6033) 2024-11-18T20:30:04,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741838_1014 (size=6033) 2024-11-18T20:30:04,879 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/7688a979e4354e9f9c263b872b3ca965 2024-11-18T20:30:04,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/7688a979e4354e9f9c263b872b3ca965 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965 2024-11-18T20:30:04,895 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965, entries=1, sequenceid=9, filesize=5.9 K 2024-11-18T20:30:04,897 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 29ms, sequenceid=9, compaction requested=false 2024-11-18T20:30:04,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: 2024-11-18T20:30:04,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:04,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-18T20:30:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-18T20:30:04,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T20:30:04,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-18T20:30:04,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 193 msec 2024-11-18T20:30:04,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:04,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:05,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:05,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:06,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:06,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:07,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:07,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:08,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:08,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:09,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:09,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:10,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:10,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 after 68080ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:30:10,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:10,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta after 68066ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T20:30:11,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:11,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:12,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:12,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:13,554 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:30:13,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:13,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:14,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T20:30:14,818 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:30:14,826 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34539%2C1731961783622.1731961814826 2024-11-18T20:30:14,832 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:14,833 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:14,833 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:14,833 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:14,833 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:14,833 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961784011 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961814826 2024-11-18T20:30:14,834 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40863:40863),(127.0.0.1/127.0.0.1:45345:45345)] 2024-11-18T20:30:14,834 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961784011 is not closed yet, will try archiving it next time 2024-11-18T20:30:14,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:14,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741833_1009 (size=5546) 2024-11-18T20:30:14,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741833_1009 (size=5546) 2024-11-18T20:30:14,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:14,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T20:30:14,839 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:30:14,840 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:30:14,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:30:14,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:14,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:14,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-18T20:30:14,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:14,995 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:30:15,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/c4ff28c88a034ad7bc8cb5f51b3a348e is 1080, key is row0003/info:/1731961814821/Put/seqid=0 2024-11-18T20:30:15,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741840_1016 (size=6033) 2024-11-18T20:30:15,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741840_1016 (size=6033) 2024-11-18T20:30:15,009 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/c4ff28c88a034ad7bc8cb5f51b3a348e 2024-11-18T20:30:15,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/c4ff28c88a034ad7bc8cb5f51b3a348e as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e 2024-11-18T20:30:15,023 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e, entries=1, sequenceid=13, filesize=5.9 K 2024-11-18T20:30:15,024 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 30ms, sequenceid=13, compaction requested=true 2024-11-18T20:30:15,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: 2024-11-18T20:30:15,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:15,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-18T20:30:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-18T20:30:15,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-18T20:30:15,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-11-18T20:30:15,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-11-18T20:30:15,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:15,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:16,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:16,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:17,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:17,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:18,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:18,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:19,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:19,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:20,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:20,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:21,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:21,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:22,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:22,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:23,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:23,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:24,451 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:30:24,451 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:30:24,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T20:30:24,908 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:30:24,909 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:30:24,913 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:30:24,913 DEBUG [Time-limited test {}] regionserver.HStore(1541): 01f6d0ddf5a713fb68de04ee6d52b2a5/info is initiating minor compaction (all files) 2024-11-18T20:30:24,913 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:30:24,914 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:24,914 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 01f6d0ddf5a713fb68de04ee6d52b2a5/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:24,914 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e] into tmpdir=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp, totalSize=17.7 K 2024-11-18T20:30:24,916 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e12fe8979e8045e896b8880e80ba9a15, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731961794619 2024-11-18T20:30:24,917 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7688a979e4354e9f9c263b872b3ca965, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731961804708 2024-11-18T20:30:24,918 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c4ff28c88a034ad7bc8cb5f51b3a348e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731961814821 2024-11-18T20:30:24,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:24,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:24,929 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 01f6d0ddf5a713fb68de04ee6d52b2a5#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:24,930 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/d4db5a202b0b4491b3ba21ed49de16db is 1080, key is row0001/info:/1731961794619/Put/seqid=0 2024-11-18T20:30:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741841_1017 (size=8296) 2024-11-18T20:30:24,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741841_1017 (size=8296) 2024-11-18T20:30:24,942 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/d4db5a202b0b4491b3ba21ed49de16db as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/d4db5a202b0b4491b3ba21ed49de16db 2024-11-18T20:30:24,949 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 01f6d0ddf5a713fb68de04ee6d52b2a5/info of 01f6d0ddf5a713fb68de04ee6d52b2a5 into d4db5a202b0b4491b3ba21ed49de16db(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:24,949 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: 2024-11-18T20:30:24,952 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34539%2C1731961783622.1731961824952 2024-11-18T20:30:24,959 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:24,959 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:24,959 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:24,959 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:24,959 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:24,959 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961814826 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961824952 2024-11-18T20:30:24,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741839_1015 (size=2520) 2024-11-18T20:30:24,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741839_1015 (size=2520) 2024-11-18T20:30:24,965 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45345:45345),(127.0.0.1/127.0.0.1:40863:40863)] 2024-11-18T20:30:24,965 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961784011 to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs/c0a89b2656d4%2C34539%2C1731961783622.1731961784011 2024-11-18T20:30:24,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:24,968 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T20:30:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T20:30:24,970 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T20:30:24,970 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T20:30:25,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-18T20:30:25,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:25,123 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:30:25,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/a73479f9209d4f8e88e35163e3350ab4 is 1080, key is row0000/info:/1731961824950/Put/seqid=0 2024-11-18T20:30:25,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741843_1019 (size=6033) 2024-11-18T20:30:25,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741843_1019 (size=6033) 2024-11-18T20:30:25,133 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/a73479f9209d4f8e88e35163e3350ab4 2024-11-18T20:30:25,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/a73479f9209d4f8e88e35163e3350ab4 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/a73479f9209d4f8e88e35163e3350ab4 2024-11-18T20:30:25,145 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/a73479f9209d4f8e88e35163e3350ab4, entries=1, sequenceid=18, filesize=5.9 K 2024-11-18T20:30:25,146 INFO [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 23ms, sequenceid=18, compaction requested=false 2024-11-18T20:30:25,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: 2024-11-18T20:30:25,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:25,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-18T20:30:25,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-18T20:30:25,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-18T20:30:25,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-18T20:30:25,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-18T20:30:25,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:25,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:26,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:26,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:27,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:27,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:28,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:28,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:29,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:29,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:30,325 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 01f6d0ddf5a713fb68de04ee6d52b2a5, had cached 0 bytes from a total of 14329 2024-11-18T20:30:30,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:30,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:31,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:31,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:32,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:32,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:33,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:33,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:34,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:34,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:34,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44371 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T20:30:34,986 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T20:30:34,989 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C34539%2C1731961783622.1731961834988 2024-11-18T20:30:34,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:34,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:34,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:34,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:34,995 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:34,995 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961824952 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961834988 2024-11-18T20:30:34,996 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45345:45345),(127.0.0.1/127.0.0.1:40863:40863)] 2024-11-18T20:30:34,996 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961824952 is not closed yet, will try archiving it next time 2024-11-18T20:30:34,996 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/WALs/c0a89b2656d4,34539,1731961783622/c0a89b2656d4%2C34539%2C1731961783622.1731961814826 to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs/c0a89b2656d4%2C34539%2C1731961783622.1731961814826 2024-11-18T20:30:34,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:30:34,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741842_1018 (size=2026) 2024-11-18T20:30:34,996 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:30:34,997 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:30:34,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:34,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741842_1018 (size=2026) 2024-11-18T20:30:34,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:34,997 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:30:34,997 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:30:34,997 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1197060721, stopped=false 2024-11-18T20:30:34,997 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,44371,1731961783573 2024-11-18T20:30:34,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:30:34,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:34,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:30:34,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:34,998 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:30:34,999 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:30:34,999 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:30:34,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:34,999 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:30:34,999 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:30:34,999 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,34539,1731961783622' ***** 2024-11-18T20:30:34,999 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:30:34,999 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:30:34,999 INFO [RS:0;c0a89b2656d4:34539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:30:34,999 INFO [RS:0;c0a89b2656d4:34539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:30:34,999 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(3091): Received CLOSE for 01f6d0ddf5a713fb68de04ee6d52b2a5 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,34539,1731961783622 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:34539. 2024-11-18T20:30:35,000 DEBUG [RS:0;c0a89b2656d4:34539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 01f6d0ddf5a713fb68de04ee6d52b2a5, disabling compactions & flushes 2024-11-18T20:30:35,000 DEBUG [RS:0;c0a89b2656d4:34539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:35,000 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. after waiting 0 ms 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:30:35,000 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T20:30:35,000 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T20:30:35,000 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 01f6d0ddf5a713fb68de04ee6d52b2a5=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.} 2024-11-18T20:30:35,000 DEBUG [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1351): Waiting on 01f6d0ddf5a713fb68de04ee6d52b2a5, 1588230740 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:30:35,000 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:30:35,000 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:30:35,001 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:30:35,001 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-18T20:30:35,006 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/2c51a61d9d124f25ac95bf0a483d5063 is 1080, key is row0001/info:/1731961834987/Put/seqid=0 2024-11-18T20:30:35,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741845_1021 (size=6033) 2024-11-18T20:30:35,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741845_1021 (size=6033) 2024-11-18T20:30:35,014 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/2c51a61d9d124f25ac95bf0a483d5063 2024-11-18T20:30:35,019 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/info/ac412a47ce4e41d297e668d8dab41ccb is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5./info:regioninfo/1731961785346/Put/seqid=0 2024-11-18T20:30:35,021 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/.tmp/info/2c51a61d9d124f25ac95bf0a483d5063 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/2c51a61d9d124f25ac95bf0a483d5063 2024-11-18T20:30:35,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741846_1022 (size=7308) 2024-11-18T20:30:35,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741846_1022 (size=7308) 2024-11-18T20:30:35,027 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/info/ac412a47ce4e41d297e668d8dab41ccb 2024-11-18T20:30:35,029 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/2c51a61d9d124f25ac95bf0a483d5063, entries=1, sequenceid=22, filesize=5.9 K 2024-11-18T20:30:35,030 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 30ms, sequenceid=22, compaction requested=true 2024-11-18T20:30:35,031 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e] to archive 2024-11-18T20:30:35,031 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:30:35,033 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15 to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/e12fe8979e8045e896b8880e80ba9a15 2024-11-18T20:30:35,035 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965 to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/7688a979e4354e9f9c263b872b3ca965 2024-11-18T20:30:35,036 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e to hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/info/c4ff28c88a034ad7bc8cb5f51b3a348e 2024-11-18T20:30:35,036 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c0a89b2656d4:44371 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:30:35,037 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e12fe8979e8045e896b8880e80ba9a15=6033, 7688a979e4354e9f9c263b872b3ca965=6033, c4ff28c88a034ad7bc8cb5f51b3a348e=6033] 2024-11-18T20:30:35,045 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/01f6d0ddf5a713fb68de04ee6d52b2a5/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-18T20:30:35,045 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:35,045 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 01f6d0ddf5a713fb68de04ee6d52b2a5: Waiting for close lock at 1731961835000Running coprocessor pre-close hooks at 1731961835000Disabling compacts and flushes for region at 1731961835000Disabling writes for close at 1731961835000Obtaining lock to block concurrent updates at 1731961835000Preparing flush snapshotting stores in 01f6d0ddf5a713fb68de04ee6d52b2a5 at 1731961835000Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731961835001 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. at 1731961835001Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5/info: creating writer at 1731961835001Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5/info: appending metadata at 1731961835006 (+5 ms)Flushing 01f6d0ddf5a713fb68de04ee6d52b2a5/info: closing flushed file at 1731961835006Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56d4492e: reopening flushed file at 1731961835020 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 01f6d0ddf5a713fb68de04ee6d52b2a5 in 30ms, sequenceid=22, compaction requested=true at 1731961835030 (+10 ms)Writing region close event to WAL at 1731961835041 (+11 ms)Running coprocessor post-close hooks at 1731961835045 (+4 ms)Closed at 1731961835045 2024-11-18T20:30:35,045 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731961784558.01f6d0ddf5a713fb68de04ee6d52b2a5. 2024-11-18T20:30:35,047 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/ns/9a7d728660ab454f95fca24a2cecfcdf is 43, key is default/ns:d/1731961784433/Put/seqid=0 2024-11-18T20:30:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741847_1023 (size=5153) 2024-11-18T20:30:35,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741847_1023 (size=5153) 2024-11-18T20:30:35,052 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/ns/9a7d728660ab454f95fca24a2cecfcdf 2024-11-18T20:30:35,069 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/table/df317954c4f64875986115c121882210 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731961785357/Put/seqid=0 2024-11-18T20:30:35,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741848_1024 (size=5508) 2024-11-18T20:30:35,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741848_1024 (size=5508) 2024-11-18T20:30:35,074 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/table/df317954c4f64875986115c121882210 2024-11-18T20:30:35,080 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/info/ac412a47ce4e41d297e668d8dab41ccb as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/info/ac412a47ce4e41d297e668d8dab41ccb 2024-11-18T20:30:35,086 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/info/ac412a47ce4e41d297e668d8dab41ccb, entries=10, sequenceid=11, filesize=7.1 K 2024-11-18T20:30:35,087 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/ns/9a7d728660ab454f95fca24a2cecfcdf as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/ns/9a7d728660ab454f95fca24a2cecfcdf 2024-11-18T20:30:35,092 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/ns/9a7d728660ab454f95fca24a2cecfcdf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T20:30:35,092 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/.tmp/table/df317954c4f64875986115c121882210 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/table/df317954c4f64875986115c121882210 2024-11-18T20:30:35,098 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/table/df317954c4f64875986115c121882210, entries=2, sequenceid=11, filesize=5.4 K 2024-11-18T20:30:35,099 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-18T20:30:35,103 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T20:30:35,103 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:30:35,104 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:30:35,104 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961835000Running coprocessor pre-close hooks at 1731961835000Disabling compacts and flushes for region at 1731961835000Disabling writes for close at 1731961835000Obtaining lock to block concurrent updates at 1731961835001 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731961835001Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731961835001Flushing stores of hbase:meta,,1.1588230740 at 1731961835001Flushing 1588230740/info: creating writer at 1731961835002 (+1 ms)Flushing 1588230740/info: appending metadata at 1731961835019 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731961835019Flushing 1588230740/ns: creating writer at 1731961835032 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731961835046 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731961835046Flushing 1588230740/table: creating writer at 1731961835056 (+10 ms)Flushing 1588230740/table: appending metadata at 1731961835069 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731961835069Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59dfacaf: reopening flushed file at 1731961835079 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a69cd3c: reopening flushed file at 1731961835086 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b795a8f: reopening flushed file at 1731961835092 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1731961835099 (+7 ms)Writing region close event to WAL at 1731961835100 (+1 ms)Running coprocessor post-close hooks at 1731961835103 (+3 ms)Closed at 1731961835104 (+1 ms) 2024-11-18T20:30:35,104 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:30:35,201 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,34539,1731961783622; all regions closed. 2024-11-18T20:30:35,201 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,201 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,201 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,202 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,202 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741834_1010 (size=3306) 2024-11-18T20:30:35,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741834_1010 (size=3306) 2024-11-18T20:30:35,205 DEBUG [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs 2024-11-18T20:30:35,205 INFO [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C34539%2C1731961783622.meta:.meta(num 1731961784393) 2024-11-18T20:30:35,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,206 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,206 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,206 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741844_1020 (size=1252) 2024-11-18T20:30:35,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741844_1020 (size=1252) 2024-11-18T20:30:35,210 DEBUG [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/oldWALs 2024-11-18T20:30:35,210 INFO [RS:0;c0a89b2656d4:34539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C34539%2C1731961783622:(num 1731961834988) 2024-11-18T20:30:35,210 DEBUG [RS:0;c0a89b2656d4:34539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:35,210 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:30:35,210 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:30:35,211 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T20:30:35,211 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:30:35,211 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:30:35,211 INFO [RS:0;c0a89b2656d4:34539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34539 2024-11-18T20:30:35,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:30:35,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,34539,1731961783622 2024-11-18T20:30:35,212 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:30:35,213 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,34539,1731961783622] 2024-11-18T20:30:35,214 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,34539,1731961783622 already deleted, retry=false 2024-11-18T20:30:35,214 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,34539,1731961783622 expired; onlineServers=0 2024-11-18T20:30:35,214 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,44371,1731961783573' ***** 2024-11-18T20:30:35,214 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:30:35,214 DEBUG [M:0;c0a89b2656d4:44371 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:30:35,214 DEBUG [M:0;c0a89b2656d4:44371 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:30:35,214 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961783784 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961783784,5,FailOnTimeoutGroup] 2024-11-18T20:30:35,214 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:30:35,214 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961783785 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961783785,5,FailOnTimeoutGroup] 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:30:35,214 DEBUG [M:0;c0a89b2656d4:44371 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:30:35,214 INFO [M:0;c0a89b2656d4:44371 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:30:35,215 INFO [M:0;c0a89b2656d4:44371 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:30:35,215 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:30:35,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:30:35,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:35,215 DEBUG [M:0;c0a89b2656d4:44371 {}] zookeeper.ZKUtil(347): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:30:35,215 WARN [M:0;c0a89b2656d4:44371 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:30:35,216 INFO [M:0;c0a89b2656d4:44371 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/.lastflushedseqids 2024-11-18T20:30:35,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741849_1025 (size=130) 2024-11-18T20:30:35,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741849_1025 (size=130) 2024-11-18T20:30:35,221 INFO [M:0;c0a89b2656d4:44371 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:30:35,221 INFO [M:0;c0a89b2656d4:44371 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:30:35,221 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:30:35,221 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:35,221 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:35,221 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:30:35,221 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:35,221 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.91 KB 2024-11-18T20:30:35,236 DEBUG [M:0;c0a89b2656d4:44371 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cac5491d9674c82aea9f4a4bdfa7348 is 82, key is hbase:meta,,1/info:regioninfo/1731961784421/Put/seqid=0 2024-11-18T20:30:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741850_1026 (size=5672) 2024-11-18T20:30:35,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741850_1026 (size=5672) 2024-11-18T20:30:35,241 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cac5491d9674c82aea9f4a4bdfa7348 2024-11-18T20:30:35,260 DEBUG [M:0;c0a89b2656d4:44371 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cda70be2da164ea093b42cd72bbfc567 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961785362/Put/seqid=0 2024-11-18T20:30:35,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741851_1027 (size=7818) 2024-11-18T20:30:35,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741851_1027 (size=7818) 2024-11-18T20:30:35,265 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cda70be2da164ea093b42cd72bbfc567 2024-11-18T20:30:35,270 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cda70be2da164ea093b42cd72bbfc567 2024-11-18T20:30:35,283 DEBUG [M:0;c0a89b2656d4:44371 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3e7f559653a4798b016a976473a70e6 is 69, key is c0a89b2656d4,34539,1731961783622/rs:state/1731961783865/Put/seqid=0 2024-11-18T20:30:35,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741852_1028 (size=5156) 2024-11-18T20:30:35,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741852_1028 (size=5156) 2024-11-18T20:30:35,288 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3e7f559653a4798b016a976473a70e6 2024-11-18T20:30:35,305 DEBUG [M:0;c0a89b2656d4:44371 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/310efe2daee14239ba5f629d76c42b5e is 52, key is load_balancer_on/state:d/1731961784554/Put/seqid=0 2024-11-18T20:30:35,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741853_1029 (size=5056) 2024-11-18T20:30:35,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741853_1029 (size=5056) 2024-11-18T20:30:35,310 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/310efe2daee14239ba5f629d76c42b5e 2024-11-18T20:30:35,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:30:35,313 INFO [RS:0;c0a89b2656d4:34539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:30:35,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1005486e1730001, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:30:35,313 INFO [RS:0;c0a89b2656d4:34539 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,34539,1731961783622; zookeeper connection closed. 2024-11-18T20:30:35,313 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@724b8b71 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@724b8b71 2024-11-18T20:30:35,314 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:30:35,315 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7cac5491d9674c82aea9f4a4bdfa7348 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7cac5491d9674c82aea9f4a4bdfa7348 2024-11-18T20:30:35,320 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7cac5491d9674c82aea9f4a4bdfa7348, entries=8, sequenceid=121, filesize=5.5 K 2024-11-18T20:30:35,321 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cda70be2da164ea093b42cd72bbfc567 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cda70be2da164ea093b42cd72bbfc567 2024-11-18T20:30:35,325 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cda70be2da164ea093b42cd72bbfc567 2024-11-18T20:30:35,326 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cda70be2da164ea093b42cd72bbfc567, entries=14, sequenceid=121, filesize=7.6 K 2024-11-18T20:30:35,327 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d3e7f559653a4798b016a976473a70e6 as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3e7f559653a4798b016a976473a70e6 2024-11-18T20:30:35,331 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d3e7f559653a4798b016a976473a70e6, entries=1, sequenceid=121, filesize=5.0 K 2024-11-18T20:30:35,332 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/310efe2daee14239ba5f629d76c42b5e as hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/310efe2daee14239ba5f629d76c42b5e 2024-11-18T20:30:35,336 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40257/user/jenkins/test-data/2f1a6e37-9e99-64e1-efb9-9c56bbaeb268/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/310efe2daee14239ba5f629d76c42b5e, entries=1, sequenceid=121, filesize=4.9 K 2024-11-18T20:30:35,337 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false 2024-11-18T20:30:35,338 INFO [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:35,339 DEBUG [M:0;c0a89b2656d4:44371 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961835221Disabling compacts and flushes for region at 1731961835221Disabling writes for close at 1731961835221Obtaining lock to block concurrent updates at 1731961835221Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961835221Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44593, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731961835222 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961835222Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961835223 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961835236 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961835236Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961835245 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961835259 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961835259Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961835270 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961835282 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961835282Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961835292 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961835304 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961835304Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c73c291: reopening flushed file at 1731961835314 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b3b2d31: reopening flushed file at 1731961835320 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31a45a42: reopening flushed file at 1731961835326 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73aed32e: reopening flushed file at 1731961835331 (+5 ms)Finished flush of dataSize ~43.55 KB/44593, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false at 1731961835337 (+6 ms)Writing region close event to WAL at 1731961835338 (+1 ms)Closed at 1731961835338 2024-11-18T20:30:35,339 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,339 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,339 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,339 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,339 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:30:35,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40163 is added to blk_1073741830_1006 (size=52990) 2024-11-18T20:30:35,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43941 is added to blk_1073741830_1006 (size=52990) 2024-11-18T20:30:35,341 INFO [M:0;c0a89b2656d4:44371 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:30:35,341 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:30:35,341 INFO [M:0;c0a89b2656d4:44371 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44371 2024-11-18T20:30:35,342 INFO [M:0;c0a89b2656d4:44371 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:30:35,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:30:35,443 INFO [M:0;c0a89b2656d4:44371 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:30:35,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44371-0x1005486e1730000, quorum=127.0.0.1:53321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:30:35,449 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2186ae4f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:30:35,450 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e1439bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:30:35,450 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:30:35,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31884fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:30:35,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@aa5bbf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,STOPPED} 2024-11-18T20:30:35,453 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:30:35,453 WARN [BP-1002095311-172.17.0.2-1731961783019 heartbeating to localhost/127.0.0.1:40257 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:30:35,453 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:30:35,453 WARN [BP-1002095311-172.17.0.2-1731961783019 heartbeating to localhost/127.0.0.1:40257 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1002095311-172.17.0.2-1731961783019 (Datanode Uuid a347b1fd-f7ac-4154-b483-208bc023db47) service to localhost/127.0.0.1:40257 2024-11-18T20:30:35,454 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data3/current/BP-1002095311-172.17.0.2-1731961783019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:30:35,454 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data4/current/BP-1002095311-172.17.0.2-1731961783019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:30:35,454 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:30:35,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3adf1c78{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:30:35,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@686f71a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:30:35,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:30:35,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fab2cd0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:30:35,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a649f7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,STOPPED} 2024-11-18T20:30:35,458 WARN [BP-1002095311-172.17.0.2-1731961783019 heartbeating to localhost/127.0.0.1:40257 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:30:35,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:30:35,458 WARN [BP-1002095311-172.17.0.2-1731961783019 heartbeating to localhost/127.0.0.1:40257 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1002095311-172.17.0.2-1731961783019 (Datanode Uuid 605857fb-13de-46f2-b3a1-74611633ec95) service to localhost/127.0.0.1:40257 2024-11-18T20:30:35,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:30:35,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data1/current/BP-1002095311-172.17.0.2-1731961783019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:30:35,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/cluster_bbaf5328-e69a-b23e-9987-8d46ea2ff4d6/data/data2/current/BP-1002095311-172.17.0.2-1731961783019 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:30:35,458 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:30:35,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@789c469a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:30:35,464 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2413c723{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:30:35,464 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:30:35,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4529c569{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:30:35,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a789efc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir/,STOPPED} 2024-11-18T20:30:35,470 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:30:35,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:30:35,494 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40257 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:40257 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40257 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40257 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40257 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40257 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/c0a89b2656d4:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40257 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40257 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:40257 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=22 (was 28), ProcessCount=11 (was 11), AvailableMemoryMB=2457 (was 2513) 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=22, ProcessCount=11, AvailableMemoryMB=2457 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.log.dir so I do NOT create it in target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1bbc4e32-50ca-be91-7f05-f47d215bdad4/hadoop.tmp.dir so I do NOT create it in target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866, deleteOnExit=true 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:30:35,500 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/test.cache.data in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:30:35,501 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:30:35,501 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:30:35,502 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:30:35,516 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:30:35,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:30:35,558 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:30:35,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:30:35,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:30:35,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:30:35,560 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:30:35,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59e63a8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:30:35,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f1e6498{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:30:35,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@643a16ca{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/java.io.tmpdir/jetty-localhost-42387-hadoop-hdfs-3_4_1-tests_jar-_-any-17963007002117109051/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:30:35,653 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ed3b4c3{HTTP/1.1, (http/1.1)}{localhost:42387} 2024-11-18T20:30:35,653 INFO [Time-limited test {}] server.Server(415): Started @234349ms 2024-11-18T20:30:35,665 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:30:35,723 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:30:35,725 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:30:35,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:30:35,728 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:30:35,729 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:30:35,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cdb7164{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:30:35,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1634e03e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:30:35,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53e82728{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/java.io.tmpdir/jetty-localhost-39223-hadoop-hdfs-3_4_1-tests_jar-_-any-11915438067982657955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:30:35,822 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7223bbba{HTTP/1.1, (http/1.1)}{localhost:39223} 2024-11-18T20:30:35,822 INFO [Time-limited test {}] server.Server(415): Started @234517ms 2024-11-18T20:30:35,823 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:30:35,847 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:30:35,850 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:30:35,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:30:35,850 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:30:35,850 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:30:35,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@695df454{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:30:35,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dadaa88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:30:35,877 WARN [Thread-1949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data1/current/BP-1431792127-172.17.0.2-1731961835520/current, will proceed with Du for space computation calculation, 2024-11-18T20:30:35,878 WARN [Thread-1950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data2/current/BP-1431792127-172.17.0.2-1731961835520/current, will proceed with Du for space computation calculation, 2024-11-18T20:30:35,879 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:30:35,896 WARN [Thread-1928 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:30:35,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d88e3efdb8a4d7f with lease ID 0x2c17e3df71c2fba5: Processing first storage report for DS-3ea68036-2a65-4717-bb8a-c17a31dd7bd3 from datanode DatanodeRegistration(127.0.0.1:34591, datanodeUuid=a2f5eb84-5012-4a39-8106-4e48d22110fa, infoPort=45623, infoSecurePort=0, ipcPort=45505, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520) 2024-11-18T20:30:35,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d88e3efdb8a4d7f with lease ID 0x2c17e3df71c2fba5: from storage DS-3ea68036-2a65-4717-bb8a-c17a31dd7bd3 node DatanodeRegistration(127.0.0.1:34591, datanodeUuid=a2f5eb84-5012-4a39-8106-4e48d22110fa, infoPort=45623, infoSecurePort=0, ipcPort=45505, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:30:35,898 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d88e3efdb8a4d7f with lease ID 0x2c17e3df71c2fba5: Processing first storage report for DS-5c3b4221-5ffe-477b-854a-28b001d38d3e from datanode DatanodeRegistration(127.0.0.1:34591, datanodeUuid=a2f5eb84-5012-4a39-8106-4e48d22110fa, infoPort=45623, infoSecurePort=0, ipcPort=45505, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520) 2024-11-18T20:30:35,898 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d88e3efdb8a4d7f with lease ID 0x2c17e3df71c2fba5: from storage DS-5c3b4221-5ffe-477b-854a-28b001d38d3e node DatanodeRegistration(127.0.0.1:34591, datanodeUuid=a2f5eb84-5012-4a39-8106-4e48d22110fa, infoPort=45623, infoSecurePort=0, ipcPort=45505, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:30:35,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:35,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:35,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4309be89{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/java.io.tmpdir/jetty-localhost-41321-hadoop-hdfs-3_4_1-tests_jar-_-any-15898340626581743716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:30:35,950 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@762ca6ce{HTTP/1.1, (http/1.1)}{localhost:41321} 2024-11-18T20:30:35,950 INFO [Time-limited test {}] server.Server(415): Started @234645ms 2024-11-18T20:30:35,950 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:30:36,003 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data3/current/BP-1431792127-172.17.0.2-1731961835520/current, will proceed with Du for space computation calculation, 2024-11-18T20:30:36,003 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data4/current/BP-1431792127-172.17.0.2-1731961835520/current, will proceed with Du for space computation calculation, 2024-11-18T20:30:36,022 WARN [Thread-1964 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:30:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b74636d204ee5c4 with lease ID 0x2c17e3df71c2fba6: Processing first storage report for DS-275caf2f-864c-46ff-a94b-60c5fff15cae from datanode DatanodeRegistration(127.0.0.1:33451, datanodeUuid=cc59418d-b16f-4489-ba2b-917c54230618, infoPort=42695, infoSecurePort=0, ipcPort=40425, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520) 2024-11-18T20:30:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b74636d204ee5c4 with lease ID 0x2c17e3df71c2fba6: from storage DS-275caf2f-864c-46ff-a94b-60c5fff15cae node DatanodeRegistration(127.0.0.1:33451, datanodeUuid=cc59418d-b16f-4489-ba2b-917c54230618, infoPort=42695, infoSecurePort=0, ipcPort=40425, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:30:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b74636d204ee5c4 with lease ID 0x2c17e3df71c2fba6: Processing first storage report for DS-a4b82a8d-f243-4d95-9e2c-be566e2628b5 from datanode DatanodeRegistration(127.0.0.1:33451, datanodeUuid=cc59418d-b16f-4489-ba2b-917c54230618, infoPort=42695, infoSecurePort=0, ipcPort=40425, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520) 2024-11-18T20:30:36,025 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b74636d204ee5c4 with lease ID 0x2c17e3df71c2fba6: from storage DS-a4b82a8d-f243-4d95-9e2c-be566e2628b5 node DatanodeRegistration(127.0.0.1:33451, datanodeUuid=cc59418d-b16f-4489-ba2b-917c54230618, infoPort=42695, infoSecurePort=0, ipcPort=40425, storageInfo=lv=-57;cid=testClusterID;nsid=1083010226;c=1731961835520), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:30:36,072 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c 2024-11-18T20:30:36,077 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/zookeeper_0, clientPort=61530, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:30:36,079 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61530 2024-11-18T20:30:36,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,080 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:30:36,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:30:36,089 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b with version=8 2024-11-18T20:30:36,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:30:36,091 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:30:36,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:30:36,092 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45287 2024-11-18T20:30:36,094 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45287 connecting to ZooKeeper ensemble=127.0.0.1:61530 2024-11-18T20:30:36,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452870x0, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:30:36,098 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45287-0x1005487ae9a0000 connected 2024-11-18T20:30:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:30:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:30:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:30:36,107 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T20:30:36,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,114 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:30:36,114 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b, hbase.cluster.distributed=false 2024-11-18T20:30:36,115 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:30:36,117 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45287 2024-11-18T20:30:36,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45287 2024-11-18T20:30:36,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45287 2024-11-18T20:30:36,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45287 2024-11-18T20:30:36,122 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45287 2024-11-18T20:30:36,135 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:30:36,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:30:36,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:30:36,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:30:36,136 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:30:36,136 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:30:36,136 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41687 2024-11-18T20:30:36,138 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41687 connecting to ZooKeeper ensemble=127.0.0.1:61530 2024-11-18T20:30:36,138 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416870x0, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:30:36,142 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416870x0, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:30:36,142 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41687-0x1005487ae9a0001 connected 2024-11-18T20:30:36,143 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:30:36,143 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:30:36,143 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:30:36,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:30:36,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41687 2024-11-18T20:30:36,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41687 2024-11-18T20:30:36,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41687 2024-11-18T20:30:36,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41687 2024-11-18T20:30:36,146 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41687 2024-11-18T20:30:36,156 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:45287 2024-11-18T20:30:36,156 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:30:36,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:30:36,158 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:30:36,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,159 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:30:36,159 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,45287,1731961836091 from backup master directory 2024-11-18T20:30:36,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:30:36,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:30:36,160 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:30:36,160 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,163 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/hbase.id] with ID: 978495c5-20c3-4dc9-8866-9f5f1fa60807 2024-11-18T20:30:36,163 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/.tmp/hbase.id 2024-11-18T20:30:36,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:30:36,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:30:36,168 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/.tmp/hbase.id]:[hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/hbase.id] 2024-11-18T20:30:36,181 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,181 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:30:36,183 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:30:36,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:30:36,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:30:36,190 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:30:36,190 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:30:36,191 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:30:36,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:30:36,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:30:36,198 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store 2024-11-18T20:30:36,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:30:36,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:30:36,204 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:36,204 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:30:36,204 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:36,204 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:36,205 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:30:36,205 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:36,205 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:30:36,205 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961836204Disabling compacts and flushes for region at 1731961836204Disabling writes for close at 1731961836205 (+1 ms)Writing region close event to WAL at 1731961836205Closed at 1731961836205 2024-11-18T20:30:36,206 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/.initializing 2024-11-18T20:30:36,206 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/WALs/c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,208 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C45287%2C1731961836091, suffix=, logDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/WALs/c0a89b2656d4,45287,1731961836091, archiveDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/oldWALs, maxLogs=10 2024-11-18T20:30:36,209 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C45287%2C1731961836091.1731961836209 2024-11-18T20:30:36,215 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/WALs/c0a89b2656d4,45287,1731961836091/c0a89b2656d4%2C45287%2C1731961836091.1731961836209 2024-11-18T20:30:36,221 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42695:42695),(127.0.0.1/127.0.0.1:45623:45623)] 2024-11-18T20:30:36,222 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:30:36,222 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:36,222 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,222 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,224 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:30:36,225 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,226 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:30:36,227 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:36,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:30:36,229 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,229 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:36,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:30:36,231 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,231 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:36,231 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,232 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,232 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,233 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,233 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,234 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:30:36,235 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:30:36,237 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:30:36,237 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=750490, jitterRate=-0.04570348560810089}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:30:36,238 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961836222Initializing all the Stores at 1731961836223 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836223Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961836224 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961836224Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961836224Cleaning up temporary data from old regions at 1731961836233 (+9 ms)Region opened successfully at 1731961836238 (+5 ms) 2024-11-18T20:30:36,238 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:30:36,241 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e91415f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:30:36,242 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:30:36,242 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:30:36,242 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:30:36,242 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:30:36,243 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:30:36,243 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:30:36,243 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:30:36,247 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:30:36,248 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:30:36,248 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:30:36,249 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:30:36,249 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:30:36,250 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:30:36,250 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:30:36,251 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:30:36,251 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:30:36,252 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:30:36,253 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:30:36,255 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:30:36,256 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:30:36,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:30:36,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:30:36,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,257 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,45287,1731961836091, sessionid=0x1005487ae9a0000, setting cluster-up flag (Was=false) 2024-11-18T20:30:36,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,261 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:30:36,262 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,267 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:30:36,267 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,269 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:30:36,274 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:30:36,275 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:30:36,275 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:30:36,275 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,45287,1731961836091 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:30:36,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,277 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961866277 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:30:36,278 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,278 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:30:36,278 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:30:36,279 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961836279,5,FailOnTimeoutGroup] 2024-11-18T20:30:36,279 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,279 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:30:36,279 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961836279,5,FailOnTimeoutGroup] 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,279 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:30:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:30:36,288 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:30:36,288 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b 2024-11-18T20:30:36,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:30:36,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:30:36,298 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:36,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:30:36,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:30:36,302 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,303 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:30:36,304 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:30:36,304 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,305 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:30:36,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:30:36,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:30:36,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:30:36,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:30:36,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740 2024-11-18T20:30:36,308 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740 2024-11-18T20:30:36,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:30:36,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:30:36,310 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:30:36,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:30:36,313 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:30:36,313 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755421, jitterRate=-0.0394325852394104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961836298Initializing all the Stores at 1731961836299 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836299Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836301 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961836301Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836301Cleaning up temporary data from old regions at 1731961836309 (+8 ms)Region opened successfully at 1731961836314 (+5 ms) 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:30:36,314 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:30:36,314 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:30:36,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961836314Disabling compacts and flushes for region at 1731961836314Disabling writes for close at 1731961836314Writing region close event to WAL at 1731961836314Closed at 1731961836314 2024-11-18T20:30:36,316 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:30:36,316 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:30:36,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:30:36,317 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:30:36,318 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:30:36,348 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(746): ClusterId : 978495c5-20c3-4dc9-8866-9f5f1fa60807 2024-11-18T20:30:36,348 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:30:36,349 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:30:36,349 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:30:36,351 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:30:36,351 DEBUG [RS:0;c0a89b2656d4:41687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e78f8b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:30:36,362 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:41687 2024-11-18T20:30:36,362 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:30:36,362 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:30:36,362 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:30:36,363 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,45287,1731961836091 with port=41687, startcode=1731961836135 2024-11-18T20:30:36,363 DEBUG [RS:0;c0a89b2656d4:41687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:30:36,365 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44003, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:30:36,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45287 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,365 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45287 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,367 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b 2024-11-18T20:30:36,367 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46871 2024-11-18T20:30:36,367 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:30:36,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:30:36,369 DEBUG [RS:0;c0a89b2656d4:41687 {}] zookeeper.ZKUtil(111): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,369 WARN [RS:0;c0a89b2656d4:41687 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:30:36,369 INFO [RS:0;c0a89b2656d4:41687 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:30:36,369 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,41687,1731961836135] 2024-11-18T20:30:36,372 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:30:36,374 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:30:36,375 INFO [RS:0;c0a89b2656d4:41687 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:30:36,375 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,375 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:30:36,376 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:30:36,376 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:30:36,376 DEBUG [RS:0;c0a89b2656d4:41687 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,377 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,41687,1731961836135-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:30:36,391 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:30:36,391 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,41687,1731961836135-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,391 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,391 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.Replication(171): c0a89b2656d4,41687,1731961836135 started 2024-11-18T20:30:36,405 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,405 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,41687,1731961836135, RpcServer on c0a89b2656d4/172.17.0.2:41687, sessionid=0x1005487ae9a0001 2024-11-18T20:30:36,405 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:30:36,405 DEBUG [RS:0;c0a89b2656d4:41687 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,405 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,41687,1731961836135' 2024-11-18T20:30:36,405 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,41687,1731961836135' 2024-11-18T20:30:36,406 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:30:36,407 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:30:36,407 DEBUG [RS:0;c0a89b2656d4:41687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:30:36,407 INFO [RS:0;c0a89b2656d4:41687 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:30:36,407 INFO [RS:0;c0a89b2656d4:41687 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:30:36,468 WARN [c0a89b2656d4:45287 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:30:36,510 INFO [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C41687%2C1731961836135, suffix=, logDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135, archiveDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs, maxLogs=32 2024-11-18T20:30:36,510 INFO [RS:0;c0a89b2656d4:41687 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C41687%2C1731961836135.1731961836510 2024-11-18T20:30:36,523 INFO [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961836510 2024-11-18T20:30:36,525 DEBUG [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42695:42695),(127.0.0.1/127.0.0.1:45623:45623)] 2024-11-18T20:30:36,719 DEBUG [c0a89b2656d4:45287 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:30:36,720 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,723 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,41687,1731961836135, state=OPENING 2024-11-18T20:30:36,725 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:30:36,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:30:36,728 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:30:36,728 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:30:36,728 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:30:36,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,41687,1731961836135}] 2024-11-18T20:30:36,883 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:30:36,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60145, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:30:36,893 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:30:36,894 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:30:36,896 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C41687%2C1731961836135.meta, suffix=.meta, logDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135, archiveDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs, maxLogs=32 2024-11-18T20:30:36,897 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C41687%2C1731961836135.meta.1731961836897.meta 2024-11-18T20:30:36,902 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.meta.1731961836897.meta 2024-11-18T20:30:36,903 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45623:45623),(127.0.0.1/127.0.0.1:42695:42695)] 2024-11-18T20:30:36,904 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:30:36,905 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:30:36,905 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:30:36,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:30:36,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:30:36,908 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:30:36,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:30:36,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:30:36,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:30:36,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:30:36,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:30:36,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:30:36,912 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:30:36,913 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740 2024-11-18T20:30:36,913 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740 2024-11-18T20:30:36,914 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:30:36,914 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:30:36,915 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:30:36,916 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:30:36,916 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710979, jitterRate=-0.09594351053237915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:30:36,916 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:30:36,917 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961836905Writing region info on filesystem at 1731961836905Initializing all the Stores at 1731961836906 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961836906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961836906Cleaning up temporary data from old regions at 1731961836914 (+8 ms)Running coprocessor post-open hooks at 1731961836916 (+2 ms)Region opened successfully at 1731961836917 (+1 ms) 2024-11-18T20:30:36,918 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961836882 2024-11-18T20:30:36,919 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:30:36,920 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:30:36,920 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,921 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,41687,1731961836135, state=OPEN 2024-11-18T20:30:36,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:30:36,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:30:36,923 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:36,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:30:36,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:30:36,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:30:36,927 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,41687,1731961836135 in 195 msec 2024-11-18T20:30:36,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:30:36,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-18T20:30:36,930 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:30:36,930 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:30:36,931 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:30:36,931 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,41687,1731961836135, seqNum=-1] 2024-11-18T20:30:36,932 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:30:36,933 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48505, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:30:36,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 663 msec 2024-11-18T20:30:36,938 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961836938, completionTime=-1 2024-11-18T20:30:36,938 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:30:36,938 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961896940 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731961956940 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:45287, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,940 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:36,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:36,943 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:30:36,944 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.784sec 2024-11-18T20:30:36,944 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:30:36,944 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:30:36,944 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:30:36,944 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:30:36,945 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:30:36,945 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:30:36,945 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:30:36,947 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:30:36,947 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:30:36,947 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,45287,1731961836091-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:30:36,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78a6273d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:30:36,948 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,45287,-1 for getting cluster id 2024-11-18T20:30:36,948 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:30:36,949 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '978495c5-20c3-4dc9-8866-9f5f1fa60807' 2024-11-18T20:30:36,949 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:30:36,949 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "978495c5-20c3-4dc9-8866-9f5f1fa60807" 2024-11-18T20:30:36,950 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e5c5329, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:30:36,950 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,45287,-1] 2024-11-18T20:30:36,950 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:30:36,950 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:30:36,951 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56816, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:30:36,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3af484fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:30:36,952 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:30:36,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,41687,1731961836135, seqNum=-1] 2024-11-18T20:30:36,953 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:30:36,954 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55320, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:30:36,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:30:36,958 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:30:36,958 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T20:30:36,959 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c0a89b2656d4,45287,1731961836091 2024-11-18T20:30:36,959 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79a93648 2024-11-18T20:30:36,959 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T20:30:36,960 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56820, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T20:30:36,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T20:30:36,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T20:30:36,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:30:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-18T20:30:36,963 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T20:30:36,963 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:36,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-18T20:30:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:30:36,964 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T20:30:36,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741835_1011 (size=381) 2024-11-18T20:30:36,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741835_1011 (size=381) 2024-11-18T20:30:36,972 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => df10b4746fa897739653c5eccd447f49, NAME => 'TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b 2024-11-18T20:30:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741836_1012 (size=64) 2024-11-18T20:30:36,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741836_1012 (size=64) 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing df10b4746fa897739653c5eccd447f49, disabling compactions & flushes 2024-11-18T20:30:36,978 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. after waiting 0 ms 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:36,978 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:36,978 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for df10b4746fa897739653c5eccd447f49: Waiting for close lock at 1731961836978Disabling compacts and flushes for region at 1731961836978Disabling writes for close at 1731961836978Writing region close event to WAL at 1731961836978Closed at 1731961836978 2024-11-18T20:30:36,980 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T20:30:36,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731961836980"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961836980"}]},"ts":"1731961836980"} 2024-11-18T20:30:36,982 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T20:30:36,983 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T20:30:36,983 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961836983"}]},"ts":"1731961836983"} 2024-11-18T20:30:36,985 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-18T20:30:36,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, ASSIGN}] 2024-11-18T20:30:36,986 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, ASSIGN 2024-11-18T20:30:36,987 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, ASSIGN; state=OFFLINE, location=c0a89b2656d4,41687,1731961836135; forceNewPlan=false, retain=false 2024-11-18T20:30:37,138 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df10b4746fa897739653c5eccd447f49, regionState=OPENING, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:37,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, ASSIGN because future has completed 2024-11-18T20:30:37,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135}] 2024-11-18T20:30:37,297 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:37,297 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => df10b4746fa897739653c5eccd447f49, NAME => 'TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:30:37,298 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,298 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:37,298 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,298 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,299 INFO [StoreOpener-df10b4746fa897739653c5eccd447f49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,300 INFO [StoreOpener-df10b4746fa897739653c5eccd447f49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region df10b4746fa897739653c5eccd447f49 columnFamilyName info 2024-11-18T20:30:37,300 DEBUG [StoreOpener-df10b4746fa897739653c5eccd447f49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:37,301 INFO [StoreOpener-df10b4746fa897739653c5eccd447f49-1 {}] regionserver.HStore(327): Store=df10b4746fa897739653c5eccd447f49/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:37,301 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,302 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,302 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,302 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,302 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,304 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,306 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:30:37,306 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened df10b4746fa897739653c5eccd447f49; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=810189, jitterRate=0.030209332704544067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:30:37,306 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:37,307 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for df10b4746fa897739653c5eccd447f49: Running coprocessor pre-open hook at 1731961837298Writing region info on filesystem at 1731961837298Initializing all the Stores at 1731961837299 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961837299Cleaning up temporary data from old regions at 1731961837302 (+3 ms)Running coprocessor post-open hooks at 1731961837306 (+4 ms)Region opened successfully at 1731961837307 (+1 ms) 2024-11-18T20:30:37,308 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., pid=6, masterSystemTime=1731961837294 2024-11-18T20:30:37,310 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:37,310 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:37,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df10b4746fa897739653c5eccd447f49, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:37,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135 because future has completed 2024-11-18T20:30:37,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T20:30:37,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135 in 172 msec 2024-11-18T20:30:37,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T20:30:37,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, ASSIGN in 330 msec 2024-11-18T20:30:37,318 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T20:30:37,318 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731961837318"}]},"ts":"1731961837318"} 2024-11-18T20:30:37,320 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-18T20:30:37,322 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T20:30:37,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-11-18T20:30:37,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:37,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:38,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:38,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:39,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:39,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:40,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,595 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:30:40,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:40,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:40,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:41,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:41,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:42,373 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T20:30:42,375 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-18T20:30:42,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:42,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:43,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:43,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:44,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:44,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:45,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:45,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:46,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T20:30:46,106 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T20:30:46,107 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:30:46,107 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T20:30:46,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T20:30:46,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T20:30:46,109 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T20:30:46,109 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T20:30:46,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:46,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:47,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45287 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T20:30:47,068 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-18T20:30:47,068 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-18T20:30:47,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-18T20:30:47,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:47,079 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., hostname=c0a89b2656d4,41687,1731961836135, seqNum=2] 2024-11-18T20:30:47,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:47,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:30:47,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/f70bc741b1374f4f99cfba62f6f4a3c1 is 1080, key is row0001/info:/1731961847080/Put/seqid=0 2024-11-18T20:30:47,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741837_1013 (size=12509) 2024-11-18T20:30:47,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741837_1013 (size=12509) 2024-11-18T20:30:47,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/f70bc741b1374f4f99cfba62f6f4a3c1 2024-11-18T20:30:47,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/f70bc741b1374f4f99cfba62f6f4a3c1 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1 2024-11-18T20:30:47,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T20:30:47,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for df10b4746fa897739653c5eccd447f49 in 38ms, sequenceid=11, compaction requested=false 2024-11-18T20:30:47,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:47,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:47,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-18T20:30:47,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/5dc0c428d3ea40c6941bed5f13873e62 is 1080, key is row0008/info:/1731961847095/Put/seqid=0 2024-11-18T20:30:47,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741838_1014 (size=24376) 2024-11-18T20:30:47,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741838_1014 (size=24376) 2024-11-18T20:30:47,144 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/5dc0c428d3ea40c6941bed5f13873e62 2024-11-18T20:30:47,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/5dc0c428d3ea40c6941bed5f13873e62 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 2024-11-18T20:30:47,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62, entries=18, sequenceid=32, filesize=23.8 K 2024-11-18T20:30:47,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=7.36 KB/7532 for df10b4746fa897739653c5eccd447f49 in 25ms, sequenceid=32, compaction requested=false 2024-11-18T20:30:47,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:47,159 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.0 K, sizeToCheck=16.0 K 2024-11-18T20:30:47,159 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:47,159 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 because midkey is the same as first or last row 2024-11-18T20:30:47,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:47,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:48,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:48,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:49,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T20:30:49,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/864c5c1058f7419f8e96fde24ec2ea63 is 1080, key is row0026/info:/1731961847134/Put/seqid=0 2024-11-18T20:30:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741839_1015 (size=13586) 2024-11-18T20:30:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741839_1015 (size=13586) 2024-11-18T20:30:49,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/864c5c1058f7419f8e96fde24ec2ea63 2024-11-18T20:30:49,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/864c5c1058f7419f8e96fde24ec2ea63 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63 2024-11-18T20:30:49,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63, entries=8, sequenceid=43, filesize=13.3 K 2024-11-18T20:30:49,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for df10b4746fa897739653c5eccd447f49 in 25ms, sequenceid=43, compaction requested=true 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 because midkey is the same as first or last row 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df10b4746fa897739653c5eccd447f49:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:30:49,181 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:49,181 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:30:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:49,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:30:49,183 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:30:49,183 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): df10b4746fa897739653c5eccd447f49/info is initiating minor compaction (all files) 2024-11-18T20:30:49,183 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df10b4746fa897739653c5eccd447f49/info in TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:49,183 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp, totalSize=49.3 K 2024-11-18T20:30:49,183 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f70bc741b1374f4f99cfba62f6f4a3c1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731961847080 2024-11-18T20:30:49,184 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5dc0c428d3ea40c6941bed5f13873e62, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731961847095 2024-11-18T20:30:49,184 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 864c5c1058f7419f8e96fde24ec2ea63, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731961847134 2024-11-18T20:30:49,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/77ad113f4284423fa9e2d5b16b24427f is 1080, key is row0034/info:/1731961849157/Put/seqid=0 2024-11-18T20:30:49,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741840_1016 (size=16817) 2024-11-18T20:30:49,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741840_1016 (size=16817) 2024-11-18T20:30:49,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/77ad113f4284423fa9e2d5b16b24427f 2024-11-18T20:30:49,198 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df10b4746fa897739653c5eccd447f49#info#compaction#58 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:49,198 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/e49aab2508e04b34a9518a4900c2b453 is 1080, key is row0001/info:/1731961847080/Put/seqid=0 2024-11-18T20:30:49,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/77ad113f4284423fa9e2d5b16b24427f as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f 2024-11-18T20:30:49,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f, entries=11, sequenceid=57, filesize=16.4 K 2024-11-18T20:30:49,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for df10b4746fa897739653c5eccd447f49 in 26ms, sequenceid=57, compaction requested=false 2024-11-18T20:30:49,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:49,208 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,208 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,209 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 because midkey is the same as first or last row 2024-11-18T20:30:49,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:49,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:30:49,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/ae9682ea00094811bf86b8807fa95d61 is 1080, key is row0045/info:/1731961849183/Put/seqid=0 2024-11-18T20:30:49,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741841_1017 (size=40670) 2024-11-18T20:30:49,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741841_1017 (size=40670) 2024-11-18T20:30:49,226 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/e49aab2508e04b34a9518a4900c2b453 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 2024-11-18T20:30:49,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741842_1018 (size=17894) 2024-11-18T20:30:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741842_1018 (size=17894) 2024-11-18T20:30:49,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/ae9682ea00094811bf86b8807fa95d61 2024-11-18T20:30:49,234 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df10b4746fa897739653c5eccd447f49/info of df10b4746fa897739653c5eccd447f49 into e49aab2508e04b34a9518a4900c2b453(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:49,234 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., storeName=df10b4746fa897739653c5eccd447f49/info, priority=13, startTime=1731961849181; duration=0sec 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 because midkey is the same as first or last row 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 because midkey is the same as first or last row 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,235 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 because midkey is the same as first or last row 2024-11-18T20:30:49,235 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:49,235 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df10b4746fa897739653c5eccd447f49:info 2024-11-18T20:30:49,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/ae9682ea00094811bf86b8807fa95d61 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61 2024-11-18T20:30:49,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61, entries=12, sequenceid=72, filesize=17.5 K 2024-11-18T20:30:49,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for df10b4746fa897739653c5eccd447f49 in 33ms, sequenceid=72, compaction requested=true 2024-11-18T20:30:49,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:49,244 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,244 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,244 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 because midkey is the same as first or last row 2024-11-18T20:30:49,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df10b4746fa897739653c5eccd447f49:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:30:49,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:49,244 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:30:49,245 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 75381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:30:49,245 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): df10b4746fa897739653c5eccd447f49/info is initiating minor compaction (all files) 2024-11-18T20:30:49,245 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df10b4746fa897739653c5eccd447f49/info in TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:49,245 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp, totalSize=73.6 K 2024-11-18T20:30:49,246 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting e49aab2508e04b34a9518a4900c2b453, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731961847080 2024-11-18T20:30:49,246 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 77ad113f4284423fa9e2d5b16b24427f, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1731961849157 2024-11-18T20:30:49,247 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting ae9682ea00094811bf86b8807fa95d61, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=72, earliestPutTs=1731961849183 2024-11-18T20:30:49,259 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df10b4746fa897739653c5eccd447f49#info#compaction#60 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:49,260 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/f6b3adf66e88473a9cc92da989f35267 is 1080, key is row0001/info:/1731961847080/Put/seqid=0 2024-11-18T20:30:49,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741843_1019 (size=65612) 2024-11-18T20:30:49,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741843_1019 (size=65612) 2024-11-18T20:30:49,271 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/f6b3adf66e88473a9cc92da989f35267 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 2024-11-18T20:30:49,277 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df10b4746fa897739653c5eccd447f49/info of df10b4746fa897739653c5eccd447f49 into f6b3adf66e88473a9cc92da989f35267(size=64.1 K), total size for store is 64.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:49,277 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:49,277 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., storeName=df10b4746fa897739653c5eccd447f49/info, priority=13, startTime=1731961849244; duration=0sec 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=64.1 K, sizeToCheck=16.0 K 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:49,278 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df10b4746fa897739653c5eccd447f49:info 2024-11-18T20:30:49,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:49,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:50,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:50,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-18T20:30:51,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/e521dcbea17b49bcb53175f8dfa45756 is 1080, key is row0057/info:/1731961849212/Put/seqid=0 2024-11-18T20:30:51,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741844_1020 (size=14663) 2024-11-18T20:30:51,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741844_1020 (size=14663) 2024-11-18T20:30:51,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/e521dcbea17b49bcb53175f8dfa45756 2024-11-18T20:30:51,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/e521dcbea17b49bcb53175f8dfa45756 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756 2024-11-18T20:30:51,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756, entries=9, sequenceid=86, filesize=14.3 K 2024-11-18T20:30:51,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for df10b4746fa897739653c5eccd447f49 in 25ms, sequenceid=86, compaction requested=false 2024-11-18T20:30:51,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:51,262 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,262 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,262 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:51,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:30:51,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/7d218f6c6b38465a92a8d40c6eab568a is 1080, key is row0066/info:/1731961851240/Put/seqid=0 2024-11-18T20:30:51,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741845_1021 (size=17894) 2024-11-18T20:30:51,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741845_1021 (size=17894) 2024-11-18T20:30:51,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/7d218f6c6b38465a92a8d40c6eab568a 2024-11-18T20:30:51,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/7d218f6c6b38465a92a8d40c6eab568a as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a 2024-11-18T20:30:51,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a, entries=12, sequenceid=101, filesize=17.5 K 2024-11-18T20:30:51,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for df10b4746fa897739653c5eccd447f49 in 22ms, sequenceid=101, compaction requested=true 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df10b4746fa897739653c5eccd447f49:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:30:51,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:51,286 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:30:51,287 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:30:51,287 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): df10b4746fa897739653c5eccd447f49/info is initiating minor compaction (all files) 2024-11-18T20:30:51,287 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df10b4746fa897739653c5eccd447f49/info in TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:51,287 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp, totalSize=95.9 K 2024-11-18T20:30:51,288 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6b3adf66e88473a9cc92da989f35267, keycount=56, bloomtype=ROW, size=64.1 K, encoding=NONE, compression=NONE, seqNum=72, earliestPutTs=1731961847080 2024-11-18T20:30:51,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,288 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting e521dcbea17b49bcb53175f8dfa45756, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731961849212 2024-11-18T20:30:51,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T20:30:51,288 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7d218f6c6b38465a92a8d40c6eab568a, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731961851240 2024-11-18T20:30:51,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/de377292440942d79701f530ae2dbbed is 1080, key is row0078/info:/1731961851264/Put/seqid=0 2024-11-18T20:30:51,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741846_1022 (size=16817) 2024-11-18T20:30:51,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741846_1022 (size=16817) 2024-11-18T20:30:51,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/de377292440942d79701f530ae2dbbed 2024-11-18T20:30:51,303 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df10b4746fa897739653c5eccd447f49#info#compaction#64 average throughput is 39.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:51,303 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/a682cfef5fc446b39a6d956b23c7be0b is 1080, key is row0001/info:/1731961847080/Put/seqid=0 2024-11-18T20:30:51,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/de377292440942d79701f530ae2dbbed as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/de377292440942d79701f530ae2dbbed 2024-11-18T20:30:51,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741847_1023 (size=88408) 2024-11-18T20:30:51,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741847_1023 (size=88408) 2024-11-18T20:30:51,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/de377292440942d79701f530ae2dbbed, entries=11, sequenceid=115, filesize=16.4 K 2024-11-18T20:30:51,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for df10b4746fa897739653c5eccd447f49 in 24ms, sequenceid=115, compaction requested=false 2024-11-18T20:30:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:51,313 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,313 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,313 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 because midkey is the same as first or last row 2024-11-18T20:30:51,314 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/a682cfef5fc446b39a6d956b23c7be0b as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b 2024-11-18T20:30:51,320 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df10b4746fa897739653c5eccd447f49/info of df10b4746fa897739653c5eccd447f49 into a682cfef5fc446b39a6d956b23c7be0b(size=86.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df10b4746fa897739653c5eccd447f49: 2024-11-18T20:30:51,320 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., storeName=df10b4746fa897739653c5eccd447f49/info, priority=13, startTime=1731961851286; duration=0sec 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-18T20:30:51,320 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T20:30:51,321 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:51,322 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:51,322 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df10b4746fa897739653c5eccd447f49:info 2024-11-18T20:30:51,323 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45287 {}] assignment.AssignmentManager(1355): Split request from c0a89b2656d4,41687,1731961836135, parent={ENCODED => df10b4746fa897739653c5eccd447f49, NAME => 'TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T20:30:51,327 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45287 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:51,330 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45287 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df10b4746fa897739653c5eccd447f49, daughterA=6353cb33c43f31f3407b72767b5eb67b, daughterB=a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:51,331 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df10b4746fa897739653c5eccd447f49, daughterA=6353cb33c43f31f3407b72767b5eb67b, daughterB=a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:51,331 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df10b4746fa897739653c5eccd447f49, daughterA=6353cb33c43f31f3407b72767b5eb67b, daughterB=a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:51,332 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df10b4746fa897739653c5eccd447f49, daughterA=6353cb33c43f31f3407b72767b5eb67b, daughterB=a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:51,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, UNASSIGN}] 2024-11-18T20:30:51,340 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, UNASSIGN 2024-11-18T20:30:51,342 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=df10b4746fa897739653c5eccd447f49, regionState=CLOSING, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:51,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, UNASSIGN because future has completed 2024-11-18T20:30:51,345 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-18T20:30:51,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135}] 2024-11-18T20:30:51,502 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,503 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-18T20:30:51,503 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing df10b4746fa897739653c5eccd447f49, disabling compactions & flushes 2024-11-18T20:30:51,503 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:51,503 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:51,503 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. after waiting 0 ms 2024-11-18T20:30:51,503 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:51,504 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing df10b4746fa897739653c5eccd447f49 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T20:30:51,511 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/b9293318b19d46e0b5de06c73dfa15b9 is 1080, key is row0089/info:/1731961851289/Put/seqid=0 2024-11-18T20:30:51,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741848_1024 (size=13586) 2024-11-18T20:30:51,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741848_1024 (size=13586) 2024-11-18T20:30:51,517 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/b9293318b19d46e0b5de06c73dfa15b9 2024-11-18T20:30:51,523 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/.tmp/info/b9293318b19d46e0b5de06c73dfa15b9 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/b9293318b19d46e0b5de06c73dfa15b9 2024-11-18T20:30:51,529 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/b9293318b19d46e0b5de06c73dfa15b9, entries=8, sequenceid=127, filesize=13.3 K 2024-11-18T20:30:51,530 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for df10b4746fa897739653c5eccd447f49 in 27ms, sequenceid=127, compaction requested=true 2024-11-18T20:30:51,532 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a] to archive 2024-11-18T20:30:51,533 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:30:51,534 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f70bc741b1374f4f99cfba62f6f4a3c1 2024-11-18T20:30:51,536 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/5dc0c428d3ea40c6941bed5f13873e62 2024-11-18T20:30:51,537 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e49aab2508e04b34a9518a4900c2b453 2024-11-18T20:30:51,538 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/864c5c1058f7419f8e96fde24ec2ea63 2024-11-18T20:30:51,539 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/77ad113f4284423fa9e2d5b16b24427f 2024-11-18T20:30:51,540 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/f6b3adf66e88473a9cc92da989f35267 2024-11-18T20:30:51,541 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/ae9682ea00094811bf86b8807fa95d61 2024-11-18T20:30:51,542 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/e521dcbea17b49bcb53175f8dfa45756 2024-11-18T20:30:51,543 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/7d218f6c6b38465a92a8d40c6eab568a 2024-11-18T20:30:51,549 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-18T20:30:51,550 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. 2024-11-18T20:30:51,550 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for df10b4746fa897739653c5eccd447f49: Waiting for close lock at 1731961851503Running coprocessor pre-close hooks at 1731961851503Disabling compacts and flushes for region at 1731961851503Disabling writes for close at 1731961851503Obtaining lock to block concurrent updates at 1731961851504 (+1 ms)Preparing flush snapshotting stores in df10b4746fa897739653c5eccd447f49 at 1731961851504Finished memstore snapshotting TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1731961851504Flushing stores of TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. at 1731961851505 (+1 ms)Flushing df10b4746fa897739653c5eccd447f49/info: creating writer at 1731961851505Flushing df10b4746fa897739653c5eccd447f49/info: appending metadata at 1731961851510 (+5 ms)Flushing df10b4746fa897739653c5eccd447f49/info: closing flushed file at 1731961851510Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a957ccd: reopening flushed file at 1731961851522 (+12 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for df10b4746fa897739653c5eccd447f49 in 27ms, sequenceid=127, compaction requested=true at 1731961851531 (+9 ms)Writing region close event to WAL at 1731961851545 (+14 ms)Running coprocessor post-close hooks at 1731961851549 (+4 ms)Closed at 1731961851550 (+1 ms) 2024-11-18T20:30:51,552 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,552 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=df10b4746fa897739653c5eccd447f49, regionState=CLOSED 2024-11-18T20:30:51,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135 because future has completed 2024-11-18T20:30:51,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-18T20:30:51,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure df10b4746fa897739653c5eccd447f49, server=c0a89b2656d4,41687,1731961836135 in 210 msec 2024-11-18T20:30:51,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T20:30:51,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df10b4746fa897739653c5eccd447f49, UNASSIGN in 219 msec 2024-11-18T20:30:51,566 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:51,569 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=df10b4746fa897739653c5eccd447f49, threads=3 2024-11-18T20:30:51,570 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/de377292440942d79701f530ae2dbbed for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,570 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/b9293318b19d46e0b5de06c73dfa15b9 for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,570 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,580 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/de377292440942d79701f530ae2dbbed, top=true 2024-11-18T20:30:51,580 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/b9293318b19d46e0b5de06c73dfa15b9, top=true 2024-11-18T20:30:51,593 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed for child: a9216e802cfb16717ac903a33d9fdd30, parent: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,593 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9 for child: a9216e802cfb16717ac903a33d9fdd30, parent: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741849_1025 (size=27) 2024-11-18T20:30:51,593 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/b9293318b19d46e0b5de06c73dfa15b9 for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,593 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/de377292440942d79701f530ae2dbbed for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741849_1025 (size=27) 2024-11-18T20:30:51,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741850_1026 (size=27) 2024-11-18T20:30:51,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741850_1026 (size=27) 2024-11-18T20:30:51,607 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b for region: df10b4746fa897739653c5eccd447f49 2024-11-18T20:30:51,610 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region df10b4746fa897739653c5eccd447f49 Daughter A: [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49] storefiles, Daughter B: [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed] storefiles. 2024-11-18T20:30:51,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741851_1027 (size=71) 2024-11-18T20:30:51,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741851_1027 (size=71) 2024-11-18T20:30:51,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:51,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:52,026 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:52,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741852_1028 (size=71) 2024-11-18T20:30:52,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741852_1028 (size=71) 2024-11-18T20:30:52,045 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:52,057 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-18T20:30:52,059 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-18T20:30:52,062 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731961852061"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731961852061"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731961852061"}]},"ts":"1731961852061"} 2024-11-18T20:30:52,062 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731961852061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961852061"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731961852061"}]},"ts":"1731961852061"} 2024-11-18T20:30:52,062 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731961852061"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731961852061"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731961852061"}]},"ts":"1731961852061"} 2024-11-18T20:30:52,077 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6353cb33c43f31f3407b72767b5eb67b, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a9216e802cfb16717ac903a33d9fdd30, ASSIGN}] 2024-11-18T20:30:52,078 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6353cb33c43f31f3407b72767b5eb67b, ASSIGN 2024-11-18T20:30:52,078 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a9216e802cfb16717ac903a33d9fdd30, ASSIGN 2024-11-18T20:30:52,079 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6353cb33c43f31f3407b72767b5eb67b, ASSIGN; state=SPLITTING_NEW, location=c0a89b2656d4,41687,1731961836135; forceNewPlan=false, retain=false 2024-11-18T20:30:52,079 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a9216e802cfb16717ac903a33d9fdd30, ASSIGN; state=SPLITTING_NEW, location=c0a89b2656d4,41687,1731961836135; forceNewPlan=false, retain=false 2024-11-18T20:30:52,230 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a9216e802cfb16717ac903a33d9fdd30, regionState=OPENING, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:52,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6353cb33c43f31f3407b72767b5eb67b, regionState=OPENING, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:52,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6353cb33c43f31f3407b72767b5eb67b, ASSIGN because future has completed 2024-11-18T20:30:52,233 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6353cb33c43f31f3407b72767b5eb67b, server=c0a89b2656d4,41687,1731961836135}] 2024-11-18T20:30:52,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a9216e802cfb16717ac903a33d9fdd30, ASSIGN because future has completed 2024-11-18T20:30:52,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135}] 2024-11-18T20:30:52,390 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:30:52,391 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 6353cb33c43f31f3407b72767b5eb67b, NAME => 'TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-18T20:30:52,391 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,391 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:52,392 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,392 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,394 INFO [StoreOpener-6353cb33c43f31f3407b72767b5eb67b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,396 INFO [StoreOpener-6353cb33c43f31f3407b72767b5eb67b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6353cb33c43f31f3407b72767b5eb67b columnFamilyName info 2024-11-18T20:30:52,396 DEBUG [StoreOpener-6353cb33c43f31f3407b72767b5eb67b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:52,410 DEBUG [StoreOpener-6353cb33c43f31f3407b72767b5eb67b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-bottom 2024-11-18T20:30:52,410 INFO [StoreOpener-6353cb33c43f31f3407b72767b5eb67b-1 {}] regionserver.HStore(327): Store=6353cb33c43f31f3407b72767b5eb67b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:52,411 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,412 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,413 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,413 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,413 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,415 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,416 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 6353cb33c43f31f3407b72767b5eb67b; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785812, jitterRate=-7.891952991485596E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:30:52,416 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:30:52,416 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 6353cb33c43f31f3407b72767b5eb67b: Running coprocessor pre-open hook at 1731961852392Writing region info on filesystem at 1731961852392Initializing all the Stores at 1731961852394 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961852394Cleaning up temporary data from old regions at 1731961852413 (+19 ms)Running coprocessor post-open hooks at 1731961852416 (+3 ms)Region opened successfully at 1731961852416 2024-11-18T20:30:52,417 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b., pid=12, masterSystemTime=1731961852386 2024-11-18T20:30:52,417 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 6353cb33c43f31f3407b72767b5eb67b:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:30:52,418 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:52,418 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-18T20:30:52,418 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:30:52,418 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): 6353cb33c43f31f3407b72767b5eb67b/info is initiating minor compaction (all files) 2024-11-18T20:30:52,418 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6353cb33c43f31f3407b72767b5eb67b/info in TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:30:52,418 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-bottom] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/.tmp, totalSize=86.3 K 2024-11-18T20:30:52,419 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1731961847080 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:30:52,420 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:30:52,420 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => a9216e802cfb16717ac903a33d9fdd30, NAME => 'TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,420 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,420 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6353cb33c43f31f3407b72767b5eb67b, regionState=OPEN, openSeqNum=131, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:52,421 INFO [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,422 INFO [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a9216e802cfb16717ac903a33d9fdd30 columnFamilyName info 2024-11-18T20:30:52,422 DEBUG [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:30:52,422 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-18T20:30:52,423 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-18T20:30:52,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-18T20:30:52,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6353cb33c43f31f3407b72767b5eb67b, server=c0a89b2656d4,41687,1731961836135 because future has completed 2024-11-18T20:30:52,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-18T20:30:52,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 6353cb33c43f31f3407b72767b5eb67b, server=c0a89b2656d4,41687,1731961836135 in 191 msec 2024-11-18T20:30:52,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6353cb33c43f31f3407b72767b5eb67b, ASSIGN in 350 msec 2024-11-18T20:30:52,432 DEBUG [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9 2024-11-18T20:30:52,437 DEBUG [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed 2024-11-18T20:30:52,440 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6353cb33c43f31f3407b72767b5eb67b#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:52,441 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/.tmp/info/bf7e9f15dea7427796c004c33d7ad03d is 1080, key is row0001/info:/1731961847080/Put/seqid=0 2024-11-18T20:30:52,444 DEBUG [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-top 2024-11-18T20:30:52,444 INFO [StoreOpener-a9216e802cfb16717ac903a33d9fdd30-1 {}] regionserver.HStore(327): Store=a9216e802cfb16717ac903a33d9fdd30/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:30:52,444 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,445 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,446 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,447 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,447 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,449 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d5346a5a4cb642028e27b81382fa8e11 is 193, key is TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30./info:regioninfo/1731961852230/Put/seqid=0 2024-11-18T20:30:52,450 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened a9216e802cfb16717ac903a33d9fdd30; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818731, jitterRate=0.041071221232414246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T20:30:52,450 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:30:52,450 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for a9216e802cfb16717ac903a33d9fdd30: Running coprocessor pre-open hook at 1731961852420Writing region info on filesystem at 1731961852420Initializing all the Stores at 1731961852421 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961852421Cleaning up temporary data from old regions at 1731961852447 (+26 ms)Running coprocessor post-open hooks at 1731961852450 (+3 ms)Region opened successfully at 1731961852450 2024-11-18T20:30:52,451 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., pid=13, masterSystemTime=1731961852386 2024-11-18T20:30:52,451 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 2 2024-11-18T20:30:52,451 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:52,451 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:30:52,453 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:30:52,453 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:30:52,453 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:30:52,453 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-top, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=116.0 K 2024-11-18T20:30:52,453 DEBUG [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:30:52,453 INFO [RS_OPEN_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:30:52,454 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] compactions.Compactor(225): Compacting a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731961847080 2024-11-18T20:30:52,454 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a9216e802cfb16717ac903a33d9fdd30, regionState=OPEN, openSeqNum=131, regionLocation=c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:52,454 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731961851264 2024-11-18T20:30:52,455 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731961851289 2024-11-18T20:30:52,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741853_1029 (size=70862) 2024-11-18T20:30:52,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741853_1029 (size=70862) 2024-11-18T20:30:52,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 because future has completed 2024-11-18T20:30:52,461 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/.tmp/info/bf7e9f15dea7427796c004c33d7ad03d as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/bf7e9f15dea7427796c004c33d7ad03d 2024-11-18T20:30:52,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-18T20:30:52,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 in 231 msec 2024-11-18T20:30:52,469 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6353cb33c43f31f3407b72767b5eb67b/info of 6353cb33c43f31f3407b72767b5eb67b into bf7e9f15dea7427796c004c33d7ad03d(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:52,469 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6353cb33c43f31f3407b72767b5eb67b: 2024-11-18T20:30:52,469 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b., storeName=6353cb33c43f31f3407b72767b5eb67b/info, priority=15, startTime=1731961852417; duration=0sec 2024-11-18T20:30:52,469 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:52,469 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6353cb33c43f31f3407b72767b5eb67b:info 2024-11-18T20:30:52,471 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-18T20:30:52,471 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a9216e802cfb16717ac903a33d9fdd30, ASSIGN in 392 msec 2024-11-18T20:30:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741854_1030 (size=9847) 2024-11-18T20:30:52,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df10b4746fa897739653c5eccd447f49, daughterA=6353cb33c43f31f3407b72767b5eb67b, daughterB=a9216e802cfb16717ac903a33d9fdd30 in 1.1440 sec 2024-11-18T20:30:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741854_1030 (size=9847) 2024-11-18T20:30:52,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d5346a5a4cb642028e27b81382fa8e11 2024-11-18T20:30:52,485 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:30:52,485 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/1e8a9820b88f40f7aa9feb70160b0eb3 is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:30:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741855_1031 (size=42984) 2024-11-18T20:30:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741855_1031 (size=42984) 2024-11-18T20:30:52,495 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/1e8a9820b88f40f7aa9feb70160b0eb3 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1e8a9820b88f40f7aa9feb70160b0eb3 2024-11-18T20:30:52,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/ns/e46879780a404d3f94debdb680ca5228 is 43, key is default/ns:d/1731961836933/Put/seqid=0 2024-11-18T20:30:52,500 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into 1e8a9820b88f40f7aa9feb70160b0eb3(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:30:52,500 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:30:52,500 INFO [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961852451; duration=0sec 2024-11-18T20:30:52,501 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:30:52,501 DEBUG [RS:0;c0a89b2656d4:41687-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:30:52,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741856_1032 (size=5153) 2024-11-18T20:30:52,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741856_1032 (size=5153) 2024-11-18T20:30:52,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/ns/e46879780a404d3f94debdb680ca5228 2024-11-18T20:30:52,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/table/9cf9b8feaecc45f79741462728141a80 is 65, key is TestLogRolling-testLogRolling/table:state/1731961837318/Put/seqid=0 2024-11-18T20:30:52,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741857_1033 (size=5340) 2024-11-18T20:30:52,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741857_1033 (size=5340) 2024-11-18T20:30:52,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/table/9cf9b8feaecc45f79741462728141a80 2024-11-18T20:30:52,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d5346a5a4cb642028e27b81382fa8e11 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/info/d5346a5a4cb642028e27b81382fa8e11 2024-11-18T20:30:52,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/info/d5346a5a4cb642028e27b81382fa8e11, entries=30, sequenceid=17, filesize=9.6 K 2024-11-18T20:30:52,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/ns/e46879780a404d3f94debdb680ca5228 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/ns/e46879780a404d3f94debdb680ca5228 2024-11-18T20:30:52,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/ns/e46879780a404d3f94debdb680ca5228, entries=2, sequenceid=17, filesize=5.0 K 2024-11-18T20:30:52,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/table/9cf9b8feaecc45f79741462728141a80 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/table/9cf9b8feaecc45f79741462728141a80 2024-11-18T20:30:52,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/table/9cf9b8feaecc45f79741462728141a80, entries=2, sequenceid=17, filesize=5.2 K 2024-11-18T20:30:52,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 127ms, sequenceid=17, compaction requested=false 2024-11-18T20:30:52,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:30:52,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:52,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55320 deadline: 1731961863309, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. is not online on c0a89b2656d4,41687,1731961836135 2024-11-18T20:30:53,336 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., hostname=c0a89b2656d4,41687,1731961836135, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., hostname=c0a89b2656d4,41687,1731961836135, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. is not online on c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:30:53,337 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., hostname=c0a89b2656d4,41687,1731961836135, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49. is not online on c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:30:53,337 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731961836960.df10b4746fa897739653c5eccd447f49., hostname=c0a89b2656d4,41687,1731961836135, seqNum=2 from cache 2024-11-18T20:30:53,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:53,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:54,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:54,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:55,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:55,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:56,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,552 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,575 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:56,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:56,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:57,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T20:30:57,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:57,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:58,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:58,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:59,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:30:59,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:00,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:00,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:01,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:01,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:02,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:02,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:03,429 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131] 2024-11-18T20:31:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:03,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:31:03,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/83a88ac56fdc48a2adbd73e00d68647c is 1080, key is row0097/info:/1731961863430/Put/seqid=0 2024-11-18T20:31:03,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741858_1034 (size=12516) 2024-11-18T20:31:03,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741858_1034 (size=12516) 2024-11-18T20:31:03,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/83a88ac56fdc48a2adbd73e00d68647c 2024-11-18T20:31:03,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/83a88ac56fdc48a2adbd73e00d68647c as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c 2024-11-18T20:31:03,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c, entries=7, sequenceid=141, filesize=12.2 K 2024-11-18T20:31:03,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for a9216e802cfb16717ac903a33d9fdd30 in 24ms, sequenceid=141, compaction requested=false 2024-11-18T20:31:03,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:03,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:03,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:31:03,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/1ce24515e583473481c5e41642b51f69 is 1080, key is row0104/info:/1731961863442/Put/seqid=0 2024-11-18T20:31:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741859_1035 (size=17906) 2024-11-18T20:31:03,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741859_1035 (size=17906) 2024-11-18T20:31:03,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/1ce24515e583473481c5e41642b51f69 2024-11-18T20:31:03,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/1ce24515e583473481c5e41642b51f69 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69 2024-11-18T20:31:03,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69, entries=12, sequenceid=156, filesize=17.5 K 2024-11-18T20:31:03,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for a9216e802cfb16717ac903a33d9fdd30 in 30ms, sequenceid=156, compaction requested=true 2024-11-18T20:31:03,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:03,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:03,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:03,497 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:03,498 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:03,498 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:03,498 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:03,498 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1e8a9820b88f40f7aa9feb70160b0eb3, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=71.7 K 2024-11-18T20:31:03,499 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e8a9820b88f40f7aa9feb70160b0eb3, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731961849225 2024-11-18T20:31:03,499 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83a88ac56fdc48a2adbd73e00d68647c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1731961863430 2024-11-18T20:31:03,499 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1ce24515e583473481c5e41642b51f69, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731961863442 2024-11-18T20:31:03,512 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#73 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:03,513 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/6ccc7b88d05b4a5085ea2fcc2339f8bd is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:03,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741860_1036 (size=63636) 2024-11-18T20:31:03,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741860_1036 (size=63636) 2024-11-18T20:31:03,524 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/6ccc7b88d05b4a5085ea2fcc2339f8bd as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6ccc7b88d05b4a5085ea2fcc2339f8bd 2024-11-18T20:31:03,531 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into 6ccc7b88d05b4a5085ea2fcc2339f8bd(size=62.1 K), total size for store is 62.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:03,531 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:03,531 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961863497; duration=0sec 2024-11-18T20:31:03,531 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:03,531 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:03,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:03,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:04,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:04,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:05,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T20:31:05,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/e2266783f2f6480ca9719eed3913b848 is 1080, key is row0116/info:/1731961863468/Put/seqid=0 2024-11-18T20:31:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741861_1037 (size=20078) 2024-11-18T20:31:05,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741861_1037 (size=20078) 2024-11-18T20:31:05,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/e2266783f2f6480ca9719eed3913b848 2024-11-18T20:31:05,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/e2266783f2f6480ca9719eed3913b848 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848 2024-11-18T20:31:05,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848, entries=14, sequenceid=174, filesize=19.6 K 2024-11-18T20:31:05,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for a9216e802cfb16717ac903a33d9fdd30 in 23ms, sequenceid=174, compaction requested=false 2024-11-18T20:31:05,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:05,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:31:05,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/bb861a34623c45a485fc651216a63ac0 is 1080, key is row0130/info:/1731961865499/Put/seqid=0 2024-11-18T20:31:05,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741862_1038 (size=17906) 2024-11-18T20:31:05,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741862_1038 (size=17906) 2024-11-18T20:31:05,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/bb861a34623c45a485fc651216a63ac0 2024-11-18T20:31:05,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/bb861a34623c45a485fc651216a63ac0 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0 2024-11-18T20:31:05,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0, entries=12, sequenceid=189, filesize=17.5 K 2024-11-18T20:31:05,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for a9216e802cfb16717ac903a33d9fdd30 in 31ms, sequenceid=189, compaction requested=true 2024-11-18T20:31:05,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:05,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:05,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:05,553 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:05,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:31:05,554 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101620 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:05,554 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:05,554 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:05,555 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6ccc7b88d05b4a5085ea2fcc2339f8bd, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=99.2 K 2024-11-18T20:31:05,555 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6ccc7b88d05b4a5085ea2fcc2339f8bd, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731961849225 2024-11-18T20:31:05,555 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2266783f2f6480ca9719eed3913b848, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731961863468 2024-11-18T20:31:05,556 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb861a34623c45a485fc651216a63ac0, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731961865499 2024-11-18T20:31:05,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/ad91587a97564bd79762d44dd15e448b is 1080, key is row0142/info:/1731961865523/Put/seqid=0 2024-11-18T20:31:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741863_1039 (size=17906) 2024-11-18T20:31:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741863_1039 (size=17906) 2024-11-18T20:31:05,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/ad91587a97564bd79762d44dd15e448b 2024-11-18T20:31:05,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/ad91587a97564bd79762d44dd15e448b as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b 2024-11-18T20:31:05,586 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#77 average throughput is 41.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:05,586 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/aec18a528361410fa63fd8e17c32d06f is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:05,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b, entries=12, sequenceid=204, filesize=17.5 K 2024-11-18T20:31:05,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for a9216e802cfb16717ac903a33d9fdd30 in 35ms, sequenceid=204, compaction requested=false 2024-11-18T20:31:05,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:05,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741864_1040 (size=91843) 2024-11-18T20:31:05,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741864_1040 (size=91843) 2024-11-18T20:31:05,604 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/aec18a528361410fa63fd8e17c32d06f as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/aec18a528361410fa63fd8e17c32d06f 2024-11-18T20:31:05,612 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into aec18a528361410fa63fd8e17c32d06f(size=89.7 K), total size for store is 107.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:05,612 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:05,612 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961865553; duration=0sec 2024-11-18T20:31:05,612 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:05,612 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:05,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:05,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:06,071 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T20:31:06,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:06,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:07,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T20:31:07,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/0e21e528c67b4683ad146b1dcc3335b1 is 1080, key is row0154/info:/1731961865556/Put/seqid=0 2024-11-18T20:31:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741865_1041 (size=13594) 2024-11-18T20:31:07,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741865_1041 (size=13594) 2024-11-18T20:31:07,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/0e21e528c67b4683ad146b1dcc3335b1 2024-11-18T20:31:07,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/0e21e528c67b4683ad146b1dcc3335b1 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1 2024-11-18T20:31:07,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1, entries=8, sequenceid=216, filesize=13.3 K 2024-11-18T20:31:07,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for a9216e802cfb16717ac903a33d9fdd30 in 25ms, sequenceid=216, compaction requested=true 2024-11-18T20:31:07,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:07,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:07,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:07,602 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:07,604 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:07,604 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:07,604 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:07,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T20:31:07,604 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/aec18a528361410fa63fd8e17c32d06f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=120.5 K 2024-11-18T20:31:07,605 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting aec18a528361410fa63fd8e17c32d06f, keycount=80, bloomtype=ROW, size=89.7 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731961849225 2024-11-18T20:31:07,605 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad91587a97564bd79762d44dd15e448b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731961865523 2024-11-18T20:31:07,605 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0e21e528c67b4683ad146b1dcc3335b1, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731961865556 2024-11-18T20:31:07,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/d98ca75a86c04f51aa999bff644ba8b2 is 1080, key is row0162/info:/1731961867578/Put/seqid=0 2024-11-18T20:31:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741866_1042 (size=17906) 2024-11-18T20:31:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741866_1042 (size=17906) 2024-11-18T20:31:07,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/d98ca75a86c04f51aa999bff644ba8b2 2024-11-18T20:31:07,619 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#80 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:07,620 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/2fac507398244f0f9d5ed4142add73f5 is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:07,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/d98ca75a86c04f51aa999bff644ba8b2 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2 2024-11-18T20:31:07,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2, entries=12, sequenceid=231, filesize=17.5 K 2024-11-18T20:31:07,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for a9216e802cfb16717ac903a33d9fdd30 in 22ms, sequenceid=231, compaction requested=false 2024-11-18T20:31:07,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:07,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-18T20:31:07,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/baad7147ac074c2cbf35b115730eb32f is 1080, key is row0174/info:/1731961867605/Put/seqid=0 2024-11-18T20:31:07,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741867_1043 (size=113509) 2024-11-18T20:31:07,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741867_1043 (size=113509) 2024-11-18T20:31:07,644 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/2fac507398244f0f9d5ed4142add73f5 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/2fac507398244f0f9d5ed4142add73f5 2024-11-18T20:31:07,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741868_1044 (size=15750) 2024-11-18T20:31:07,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/baad7147ac074c2cbf35b115730eb32f 2024-11-18T20:31:07,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741868_1044 (size=15750) 2024-11-18T20:31:07,650 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into 2fac507398244f0f9d5ed4142add73f5(size=110.8 K), total size for store is 128.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:07,650 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:07,650 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961867602; duration=0sec 2024-11-18T20:31:07,650 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:07,650 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:07,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/baad7147ac074c2cbf35b115730eb32f as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f 2024-11-18T20:31:07,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f, entries=10, sequenceid=244, filesize=15.4 K 2024-11-18T20:31:07,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for a9216e802cfb16717ac903a33d9fdd30 in 30ms, sequenceid=244, compaction requested=true 2024-11-18T20:31:07,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:07,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:07,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:07,658 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:07,660 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 147165 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:07,660 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:07,660 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:07,660 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/2fac507398244f0f9d5ed4142add73f5, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=143.7 K 2024-11-18T20:31:07,660 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2fac507398244f0f9d5ed4142add73f5, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1731961849225 2024-11-18T20:31:07,661 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting d98ca75a86c04f51aa999bff644ba8b2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1731961867578 2024-11-18T20:31:07,661 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting baad7147ac074c2cbf35b115730eb32f, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731961867605 2024-11-18T20:31:07,672 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#82 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:07,672 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/a53df3236cc547bb8907bebd692c126f is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:07,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741869_1045 (size=137463) 2024-11-18T20:31:07,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741869_1045 (size=137463) 2024-11-18T20:31:07,681 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/a53df3236cc547bb8907bebd692c126f as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a53df3236cc547bb8907bebd692c126f 2024-11-18T20:31:07,687 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into a53df3236cc547bb8907bebd692c126f(size=134.2 K), total size for store is 134.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:07,687 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:07,687 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961867658; duration=0sec 2024-11-18T20:31:07,687 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:07,687 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:07,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:07,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:08,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:08,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:09,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-18T20:31:09,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/7039b3c93fba42d1ab34a9d3c5275a37 is 1080, key is row0184/info:/1731961867629/Put/seqid=0 2024-11-18T20:31:09,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741870_1046 (size=15751) 2024-11-18T20:31:09,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741870_1046 (size=15751) 2024-11-18T20:31:09,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/7039b3c93fba42d1ab34a9d3c5275a37 2024-11-18T20:31:09,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/7039b3c93fba42d1ab34a9d3c5275a37 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37 2024-11-18T20:31:09,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37, entries=10, sequenceid=259, filesize=15.4 K 2024-11-18T20:31:09,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=13.66 KB/13988 for a9216e802cfb16717ac903a33d9fdd30 in 35ms, sequenceid=259, compaction requested=false 2024-11-18T20:31:09,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:09,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-18T20:31:09,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/88a840aaa8cc491d990d2b6791bdcaa2 is 1080, key is row0194/info:/1731961869651/Put/seqid=0 2024-11-18T20:31:09,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-18T20:31:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55320 deadline: 1731961879727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 2024-11-18T20:31:09,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:31:09,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:31:09,729 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 because the exception is null or not the one we care about 2024-11-18T20:31:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741871_1047 (size=20092) 2024-11-18T20:31:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741871_1047 (size=20092) 2024-11-18T20:31:09,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/88a840aaa8cc491d990d2b6791bdcaa2 2024-11-18T20:31:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/88a840aaa8cc491d990d2b6791bdcaa2 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2 2024-11-18T20:31:09,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2, entries=14, sequenceid=276, filesize=19.6 K 2024-11-18T20:31:09,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for a9216e802cfb16717ac903a33d9fdd30 in 65ms, sequenceid=276, compaction requested=true 2024-11-18T20:31:09,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:09,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:09,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:09,751 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:09,753 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 173306 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:09,753 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:09,753 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:09,753 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a53df3236cc547bb8907bebd692c126f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=169.2 K 2024-11-18T20:31:09,754 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting a53df3236cc547bb8907bebd692c126f, keycount=122, bloomtype=ROW, size=134.2 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731961849225 2024-11-18T20:31:09,754 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7039b3c93fba42d1ab34a9d3c5275a37, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1731961867629 2024-11-18T20:31:09,755 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting 88a840aaa8cc491d990d2b6791bdcaa2, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731961869651 2024-11-18T20:31:09,770 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#85 average throughput is 49.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:09,771 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/fc1b3ae040ce4047a573169b3ceb425c is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:09,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741872_1048 (size=163525) 2024-11-18T20:31:09,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741872_1048 (size=163525) 2024-11-18T20:31:09,794 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/fc1b3ae040ce4047a573169b3ceb425c as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fc1b3ae040ce4047a573169b3ceb425c 2024-11-18T20:31:09,803 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into fc1b3ae040ce4047a573169b3ceb425c(size=159.7 K), total size for store is 159.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:09,803 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:09,803 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961869751; duration=0sec 2024-11-18T20:31:09,803 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:09,803 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:09,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:09,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:10,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:10,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:11,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:11,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:12,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:12,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:13,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:13,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:14,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:14,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:15,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:15,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:16,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:16,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:17,547 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T20:31:17,547 INFO [master/c0a89b2656d4:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T20:31:17,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:17,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:18,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:18,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:19,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-18T20:31:19,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f6c97360aaba402a8bc3d47a675c5bfe is 1080, key is row0208/info:/1731961869687/Put/seqid=0 2024-11-18T20:31:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741873_1049 (size=22254) 2024-11-18T20:31:19,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741873_1049 (size=22254) 2024-11-18T20:31:19,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f6c97360aaba402a8bc3d47a675c5bfe 2024-11-18T20:31:19,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f6c97360aaba402a8bc3d47a675c5bfe as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe 2024-11-18T20:31:19,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe, entries=16, sequenceid=296, filesize=21.7 K 2024-11-18T20:31:19,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for a9216e802cfb16717ac903a33d9fdd30 in 23ms, sequenceid=296, compaction requested=false 2024-11-18T20:31:19,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:19,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:19,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:20,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:20,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:21,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:21,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T20:31:21,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/fa0dbedf730a464999a01ef3b196bc94 is 1080, key is row0224/info:/1731961879769/Put/seqid=0 2024-11-18T20:31:21,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741874_1050 (size=12523) 2024-11-18T20:31:21,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741874_1050 (size=12523) 2024-11-18T20:31:21,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-18T20:31:21,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:55320 deadline: 1731961891824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 2024-11-18T20:31:21,825 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 , the old value is region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:31:21,825 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=a9216e802cfb16717ac903a33d9fdd30, server=c0a89b2656d4,41687,1731961836135 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T20:31:21,825 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., hostname=c0a89b2656d4,41687,1731961836135, seqNum=131 because the exception is null or not the one we care about 2024-11-18T20:31:21,905 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-18T20:31:21,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:21,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:22,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/fa0dbedf730a464999a01ef3b196bc94 2024-11-18T20:31:22,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/fa0dbedf730a464999a01ef3b196bc94 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94 2024-11-18T20:31:22,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94, entries=7, sequenceid=306, filesize=12.2 K 2024-11-18T20:31:22,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for a9216e802cfb16717ac903a33d9fdd30 in 425ms, sequenceid=306, compaction requested=true 2024-11-18T20:31:22,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:22,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a9216e802cfb16717ac903a33d9fdd30:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T20:31:22,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:22,207 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T20:31:22,208 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 198302 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T20:31:22,208 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1541): a9216e802cfb16717ac903a33d9fdd30/info is initiating minor compaction (all files) 2024-11-18T20:31:22,209 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a9216e802cfb16717ac903a33d9fdd30/info in TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:22,209 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fc1b3ae040ce4047a573169b3ceb425c, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94] into tmpdir=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp, totalSize=193.7 K 2024-11-18T20:31:22,209 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc1b3ae040ce4047a573169b3ceb425c, keycount=146, bloomtype=ROW, size=159.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731961849225 2024-11-18T20:31:22,209 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6c97360aaba402a8bc3d47a675c5bfe, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1731961869687 2024-11-18T20:31:22,210 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] compactions.Compactor(225): Compacting fa0dbedf730a464999a01ef3b196bc94, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1731961879769 2024-11-18T20:31:22,221 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a9216e802cfb16717ac903a33d9fdd30#info#compaction#88 average throughput is 57.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T20:31:22,222 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/19e87c98be5349f6bbfe238d177aa05f is 1080, key is row0062/info:/1731961849225/Put/seqid=0 2024-11-18T20:31:22,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741875_1051 (size=188452) 2024-11-18T20:31:22,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741875_1051 (size=188452) 2024-11-18T20:31:22,229 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/19e87c98be5349f6bbfe238d177aa05f as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/19e87c98be5349f6bbfe238d177aa05f 2024-11-18T20:31:22,234 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a9216e802cfb16717ac903a33d9fdd30/info of a9216e802cfb16717ac903a33d9fdd30 into 19e87c98be5349f6bbfe238d177aa05f(size=184.0 K), total size for store is 184.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T20:31:22,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:22,234 INFO [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30., storeName=a9216e802cfb16717ac903a33d9fdd30/info, priority=13, startTime=1731961882207; duration=0sec 2024-11-18T20:31:22,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T20:31:22,234 DEBUG [RS:0;c0a89b2656d4:41687-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a9216e802cfb16717ac903a33d9fdd30:info 2024-11-18T20:31:22,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:22,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:23,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:23,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:24,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:24,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:25,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:25,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:26,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:26,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:27,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:27,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:28,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:28,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:29,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:29,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:30,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:30,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41687 {}] regionserver.HRegion(8855): Flush requested on a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:31,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-18T20:31:31,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/6be0db48dc1c47c1bf0c0d53d9fbc6c5 is 1080, key is row0231/info:/1731961881784/Put/seqid=0 2024-11-18T20:31:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741876_1052 (size=29807) 2024-11-18T20:31:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741876_1052 (size=29807) 2024-11-18T20:31:31,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/6be0db48dc1c47c1bf0c0d53d9fbc6c5 2024-11-18T20:31:31,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/6be0db48dc1c47c1bf0c0d53d9fbc6c5 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6be0db48dc1c47c1bf0c0d53d9fbc6c5 2024-11-18T20:31:31,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6be0db48dc1c47c1bf0c0d53d9fbc6c5, entries=23, sequenceid=333, filesize=29.1 K 2024-11-18T20:31:31,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for a9216e802cfb16717ac903a33d9fdd30 in 24ms, sequenceid=333, compaction requested=false 2024-11-18T20:31:31,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:31,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:31,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:32,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:32,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:33,875 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-18T20:31:33,876 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C41687%2C1731961836135.1731961893876 2024-11-18T20:31:33,887 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,887 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,888 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,888 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961836510 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961893876 2024-11-18T20:31:33,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741833_1009 (size=316763) 2024-11-18T20:31:33,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741833_1009 (size=316763) 2024-11-18T20:31:33,893 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42695:42695),(127.0.0.1/127.0.0.1:45623:45623)] 2024-11-18T20:31:33,896 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6353cb33c43f31f3407b72767b5eb67b: 2024-11-18T20:31:33,897 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-18T20:31:33,901 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d9a28badcf3b401dbff3c46b6d5aefdb is 193, key is TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30./info:regioninfo/1731961852454/Put/seqid=0 2024-11-18T20:31:33,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741878_1054 (size=6223) 2024-11-18T20:31:33,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741878_1054 (size=6223) 2024-11-18T20:31:33,908 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d9a28badcf3b401dbff3c46b6d5aefdb 2024-11-18T20:31:33,912 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/.tmp/info/d9a28badcf3b401dbff3c46b6d5aefdb as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/info/d9a28badcf3b401dbff3c46b6d5aefdb 2024-11-18T20:31:33,916 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/info/d9a28badcf3b401dbff3c46b6d5aefdb, entries=5, sequenceid=21, filesize=6.1 K 2024-11-18T20:31:33,917 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-18T20:31:33,917 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T20:31:33,918 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a9216e802cfb16717ac903a33d9fdd30 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T20:31:33,921 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f51dc2b58759404fa6f1ea56e2ec7bc0 is 1080, key is row0254/info:/1731961891869/Put/seqid=0 2024-11-18T20:31:33,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741879_1055 (size=8199) 2024-11-18T20:31:33,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741879_1055 (size=8199) 2024-11-18T20:31:33,925 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f51dc2b58759404fa6f1ea56e2ec7bc0 2024-11-18T20:31:33,931 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/.tmp/info/f51dc2b58759404fa6f1ea56e2ec7bc0 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f51dc2b58759404fa6f1ea56e2ec7bc0 2024-11-18T20:31:33,936 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f51dc2b58759404fa6f1ea56e2ec7bc0, entries=3, sequenceid=339, filesize=8.0 K 2024-11-18T20:31:33,937 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a9216e802cfb16717ac903a33d9fdd30 in 20ms, sequenceid=339, compaction requested=true 2024-11-18T20:31:33,937 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a9216e802cfb16717ac903a33d9fdd30: 2024-11-18T20:31:33,937 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C41687%2C1731961836135.1731961893937 2024-11-18T20:31:33,941 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,941 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,941 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,941 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:33,942 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961893876 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961893937 2024-11-18T20:31:33,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45623:45623),(127.0.0.1/127.0.0.1:42695:42695)] 2024-11-18T20:31:33,942 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961893876 is not closed yet, will try archiving it next time 2024-11-18T20:31:33,943 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961836510 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs/c0a89b2656d4%2C41687%2C1731961836135.1731961836510 2024-11-18T20:31:33,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741877_1053 (size=731) 2024-11-18T20:31:33,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741877_1053 (size=731) 2024-11-18T20:31:33,943 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T20:31:33,944 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/WALs/c0a89b2656d4,41687,1731961836135/c0a89b2656d4%2C41687%2C1731961836135.1731961893876 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs/c0a89b2656d4%2C41687%2C1731961836135.1731961893876 2024-11-18T20:31:33,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:33,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:34,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:31:34,044 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:31:34,044 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:34,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:34,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:34,044 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:31:34,044 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:31:34,044 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1103943411, stopped=false 2024-11-18T20:31:34,044 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,45287,1731961836091 2024-11-18T20:31:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:34,045 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:31:34,046 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:31:34,046 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:34,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:34,046 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,41687,1731961836135' ***** 2024-11-18T20:31:34,046 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:31:34,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:34,046 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:34,046 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:31:34,046 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:31:34,046 INFO [RS:0;c0a89b2656d4:41687 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(3091): Received CLOSE for 6353cb33c43f31f3407b72767b5eb67b 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(3091): Received CLOSE for a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,41687,1731961836135 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:41687. 2024-11-18T20:31:34,047 DEBUG [RS:0;c0a89b2656d4:41687 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6353cb33c43f31f3407b72767b5eb67b, disabling compactions & flushes 2024-11-18T20:31:34,047 DEBUG [RS:0;c0a89b2656d4:41687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:34,047 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. after waiting 0 ms 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:31:34,047 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-18T20:31:34,047 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1325): Online Regions={6353cb33c43f31f3407b72767b5eb67b=TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b., 1588230740=hbase:meta,,1.1588230740, a9216e802cfb16717ac903a33d9fdd30=TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.} 2024-11-18T20:31:34,047 DEBUG [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6353cb33c43f31f3407b72767b5eb67b, a9216e802cfb16717ac903a33d9fdd30 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:31:34,047 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:31:34,047 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:31:34,047 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-bottom] to archive 2024-11-18T20:31:34,048 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:31:34,050 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49 2024-11-18T20:31:34,050 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c0a89b2656d4:45287 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T20:31:34,051 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-18T20:31:34,052 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-18T20:31:34,052 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:31:34,052 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:31:34,052 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961894047Running coprocessor pre-close hooks at 1731961894047Disabling compacts and flushes for region at 1731961894047Disabling writes for close at 1731961894047Writing region close event to WAL at 1731961894048 (+1 ms)Running coprocessor post-close hooks at 1731961894052 (+4 ms)Closed at 1731961894052 2024-11-18T20:31:34,052 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:31:34,054 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/6353cb33c43f31f3407b72767b5eb67b/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-18T20:31:34,055 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6353cb33c43f31f3407b72767b5eb67b: Waiting for close lock at 1731961894047Running coprocessor pre-close hooks at 1731961894047Disabling compacts and flushes for region at 1731961894047Disabling writes for close at 1731961894047Writing region close event to WAL at 1731961894051 (+4 ms)Running coprocessor post-close hooks at 1731961894055 (+4 ms)Closed at 1731961894055 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731961851327.6353cb33c43f31f3407b72767b5eb67b. 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a9216e802cfb16717ac903a33d9fdd30, disabling compactions & flushes 2024-11-18T20:31:34,055 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. after waiting 0 ms 2024-11-18T20:31:34,055 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:34,055 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49->hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/df10b4746fa897739653c5eccd447f49/info/a682cfef5fc446b39a6d956b23c7be0b-top, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1e8a9820b88f40f7aa9feb70160b0eb3, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6ccc7b88d05b4a5085ea2fcc2339f8bd, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/aec18a528361410fa63fd8e17c32d06f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/2fac507398244f0f9d5ed4142add73f5, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a53df3236cc547bb8907bebd692c126f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fc1b3ae040ce4047a573169b3ceb425c, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94] to archive 2024-11-18T20:31:34,056 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T20:31:34,058 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a682cfef5fc446b39a6d956b23c7be0b.df10b4746fa897739653c5eccd447f49 2024-11-18T20:31:34,059 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-de377292440942d79701f530ae2dbbed 2024-11-18T20:31:34,060 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1e8a9820b88f40f7aa9feb70160b0eb3 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1e8a9820b88f40f7aa9feb70160b0eb3 2024-11-18T20:31:34,061 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/TestLogRolling-testLogRolling=df10b4746fa897739653c5eccd447f49-b9293318b19d46e0b5de06c73dfa15b9 2024-11-18T20:31:34,062 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/83a88ac56fdc48a2adbd73e00d68647c 2024-11-18T20:31:34,063 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6ccc7b88d05b4a5085ea2fcc2339f8bd to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/6ccc7b88d05b4a5085ea2fcc2339f8bd 2024-11-18T20:31:34,064 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/1ce24515e583473481c5e41642b51f69 2024-11-18T20:31:34,065 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/e2266783f2f6480ca9719eed3913b848 2024-11-18T20:31:34,066 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/aec18a528361410fa63fd8e17c32d06f to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/aec18a528361410fa63fd8e17c32d06f 2024-11-18T20:31:34,067 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/bb861a34623c45a485fc651216a63ac0 2024-11-18T20:31:34,068 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/ad91587a97564bd79762d44dd15e448b 2024-11-18T20:31:34,069 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/2fac507398244f0f9d5ed4142add73f5 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/2fac507398244f0f9d5ed4142add73f5 2024-11-18T20:31:34,070 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/0e21e528c67b4683ad146b1dcc3335b1 2024-11-18T20:31:34,071 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/d98ca75a86c04f51aa999bff644ba8b2 2024-11-18T20:31:34,073 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a53df3236cc547bb8907bebd692c126f to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/a53df3236cc547bb8907bebd692c126f 2024-11-18T20:31:34,074 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/baad7147ac074c2cbf35b115730eb32f 2024-11-18T20:31:34,075 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/7039b3c93fba42d1ab34a9d3c5275a37 2024-11-18T20:31:34,076 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fc1b3ae040ce4047a573169b3ceb425c to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fc1b3ae040ce4047a573169b3ceb425c 2024-11-18T20:31:34,077 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/88a840aaa8cc491d990d2b6791bdcaa2 2024-11-18T20:31:34,077 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/f6c97360aaba402a8bc3d47a675c5bfe 2024-11-18T20:31:34,078 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94 to hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/archive/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/info/fa0dbedf730a464999a01ef3b196bc94 2024-11-18T20:31:34,078 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1e8a9820b88f40f7aa9feb70160b0eb3=42984, 83a88ac56fdc48a2adbd73e00d68647c=12516, 6ccc7b88d05b4a5085ea2fcc2339f8bd=63636, 1ce24515e583473481c5e41642b51f69=17906, e2266783f2f6480ca9719eed3913b848=20078, aec18a528361410fa63fd8e17c32d06f=91843, bb861a34623c45a485fc651216a63ac0=17906, ad91587a97564bd79762d44dd15e448b=17906, 2fac507398244f0f9d5ed4142add73f5=113509, 0e21e528c67b4683ad146b1dcc3335b1=13594, d98ca75a86c04f51aa999bff644ba8b2=17906, a53df3236cc547bb8907bebd692c126f=137463, baad7147ac074c2cbf35b115730eb32f=15750, 7039b3c93fba42d1ab34a9d3c5275a37=15751, fc1b3ae040ce4047a573169b3ceb425c=163525, 88a840aaa8cc491d990d2b6791bdcaa2=20092, f6c97360aaba402a8bc3d47a675c5bfe=22254, fa0dbedf730a464999a01ef3b196bc94=12523] 2024-11-18T20:31:34,082 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/data/default/TestLogRolling-testLogRolling/a9216e802cfb16717ac903a33d9fdd30/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-18T20:31:34,082 INFO [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:34,082 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a9216e802cfb16717ac903a33d9fdd30: Waiting for close lock at 1731961894055Running coprocessor pre-close hooks at 1731961894055Disabling compacts and flushes for region at 1731961894055Disabling writes for close at 1731961894055Writing region close event to WAL at 1731961894079 (+24 ms)Running coprocessor post-close hooks at 1731961894082 (+3 ms)Closed at 1731961894082 2024-11-18T20:31:34,082 DEBUG [RS_CLOSE_REGION-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731961851327.a9216e802cfb16717ac903a33d9fdd30. 2024-11-18T20:31:34,247 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,41687,1731961836135; all regions closed. 2024-11-18T20:31:34,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,248 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741834_1010 (size=8107) 2024-11-18T20:31:34,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741834_1010 (size=8107) 2024-11-18T20:31:34,252 DEBUG [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs 2024-11-18T20:31:34,252 INFO [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C41687%2C1731961836135.meta:.meta(num 1731961836897) 2024-11-18T20:31:34,252 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,253 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,253 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,253 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,253 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741880_1056 (size=780) 2024-11-18T20:31:34,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741880_1056 (size=780) 2024-11-18T20:31:34,256 DEBUG [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/oldWALs 2024-11-18T20:31:34,256 INFO [RS:0;c0a89b2656d4:41687 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C41687%2C1731961836135:(num 1731961893937) 2024-11-18T20:31:34,256 DEBUG [RS:0;c0a89b2656d4:41687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:34,256 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:31:34,256 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:31:34,256 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:31:34,256 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:31:34,257 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:31:34,257 INFO [RS:0;c0a89b2656d4:41687 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41687 2024-11-18T20:31:34,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:31:34,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,41687,1731961836135 2024-11-18T20:31:34,258 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:31:34,259 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,41687,1731961836135] 2024-11-18T20:31:34,259 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,41687,1731961836135 already deleted, retry=false 2024-11-18T20:31:34,259 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,41687,1731961836135 expired; onlineServers=0 2024-11-18T20:31:34,259 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,45287,1731961836091' ***** 2024-11-18T20:31:34,259 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:31:34,259 INFO [M:0;c0a89b2656d4:45287 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:31:34,259 INFO [M:0;c0a89b2656d4:45287 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:31:34,260 DEBUG [M:0;c0a89b2656d4:45287 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:31:34,260 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:31:34,260 DEBUG [M:0;c0a89b2656d4:45287 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:31:34,260 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961836279 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961836279,5,FailOnTimeoutGroup] 2024-11-18T20:31:34,260 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961836279 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961836279,5,FailOnTimeoutGroup] 2024-11-18T20:31:34,260 INFO [M:0;c0a89b2656d4:45287 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:31:34,260 INFO [M:0;c0a89b2656d4:45287 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:31:34,260 DEBUG [M:0;c0a89b2656d4:45287 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:31:34,260 INFO [M:0;c0a89b2656d4:45287 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:31:34,260 INFO [M:0;c0a89b2656d4:45287 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:31:34,260 INFO [M:0;c0a89b2656d4:45287 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:31:34,260 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:31:34,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:31:34,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:34,261 DEBUG [M:0;c0a89b2656d4:45287 {}] zookeeper.ZKUtil(347): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:31:34,261 WARN [M:0;c0a89b2656d4:45287 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:31:34,261 INFO [M:0;c0a89b2656d4:45287 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/.lastflushedseqids 2024-11-18T20:31:34,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741881_1057 (size=228) 2024-11-18T20:31:34,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741881_1057 (size=228) 2024-11-18T20:31:34,266 INFO [M:0;c0a89b2656d4:45287 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:31:34,266 INFO [M:0;c0a89b2656d4:45287 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:31:34,267 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:31:34,267 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:34,267 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:34,267 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:31:34,267 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:34,267 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-18T20:31:34,281 DEBUG [M:0;c0a89b2656d4:45287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39e59921011d418d932c938a0ebb262d is 82, key is hbase:meta,,1/info:regioninfo/1731961836920/Put/seqid=0 2024-11-18T20:31:34,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741882_1058 (size=5672) 2024-11-18T20:31:34,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741882_1058 (size=5672) 2024-11-18T20:31:34,286 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39e59921011d418d932c938a0ebb262d 2024-11-18T20:31:34,311 DEBUG [M:0;c0a89b2656d4:45287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db400469f1cd45f1a1e03081e4b327e4 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731961837323/Put/seqid=0 2024-11-18T20:31:34,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741883_1059 (size=7089) 2024-11-18T20:31:34,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741883_1059 (size=7089) 2024-11-18T20:31:34,316 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db400469f1cd45f1a1e03081e4b327e4 2024-11-18T20:31:34,320 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for db400469f1cd45f1a1e03081e4b327e4 2024-11-18T20:31:34,338 DEBUG [M:0;c0a89b2656d4:45287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d6ed30e5d144663986e24883c8e8660 is 69, key is c0a89b2656d4,41687,1731961836135/rs:state/1731961836366/Put/seqid=0 2024-11-18T20:31:34,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741884_1060 (size=5156) 2024-11-18T20:31:34,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741884_1060 (size=5156) 2024-11-18T20:31:34,343 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d6ed30e5d144663986e24883c8e8660 2024-11-18T20:31:34,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:34,359 INFO [RS:0;c0a89b2656d4:41687 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:31:34,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41687-0x1005487ae9a0001, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:34,359 INFO [RS:0;c0a89b2656d4:41687 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,41687,1731961836135; zookeeper connection closed. 2024-11-18T20:31:34,359 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fb134e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fb134e 2024-11-18T20:31:34,359 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:31:34,366 DEBUG [M:0;c0a89b2656d4:45287 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31e7d505031f491b997b9c405317b64b is 52, key is load_balancer_on/state:d/1731961836957/Put/seqid=0 2024-11-18T20:31:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741885_1061 (size=5056) 2024-11-18T20:31:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741885_1061 (size=5056) 2024-11-18T20:31:34,370 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31e7d505031f491b997b9c405317b64b 2024-11-18T20:31:34,374 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/39e59921011d418d932c938a0ebb262d as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39e59921011d418d932c938a0ebb262d 2024-11-18T20:31:34,378 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/39e59921011d418d932c938a0ebb262d, entries=8, sequenceid=125, filesize=5.5 K 2024-11-18T20:31:34,379 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db400469f1cd45f1a1e03081e4b327e4 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db400469f1cd45f1a1e03081e4b327e4 2024-11-18T20:31:34,381 INFO [regionserver/c0a89b2656d4:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:31:34,383 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for db400469f1cd45f1a1e03081e4b327e4 2024-11-18T20:31:34,383 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db400469f1cd45f1a1e03081e4b327e4, entries=13, sequenceid=125, filesize=6.9 K 2024-11-18T20:31:34,384 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2d6ed30e5d144663986e24883c8e8660 as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d6ed30e5d144663986e24883c8e8660 2024-11-18T20:31:34,387 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2d6ed30e5d144663986e24883c8e8660, entries=1, sequenceid=125, filesize=5.0 K 2024-11-18T20:31:34,388 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31e7d505031f491b997b9c405317b64b as hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/31e7d505031f491b997b9c405317b64b 2024-11-18T20:31:34,392 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46871/user/jenkins/test-data/2c779fe6-8169-c047-9f5c-77ab7f18e77b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/31e7d505031f491b997b9c405317b64b, entries=1, sequenceid=125, filesize=4.9 K 2024-11-18T20:31:34,393 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false 2024-11-18T20:31:34,394 INFO [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:34,394 DEBUG [M:0;c0a89b2656d4:45287 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961894267Disabling compacts and flushes for region at 1731961894267Disabling writes for close at 1731961894267Obtaining lock to block concurrent updates at 1731961894267Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961894267Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731961894267Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961894268 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961894268Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961894281 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961894281Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961894290 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961894311 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961894311Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961894320 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961894338 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961894338Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961894348 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961894365 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961894365Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69ec5541: reopening flushed file at 1731961894373 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50f48305: reopening flushed file at 1731961894378 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73819013: reopening flushed file at 1731961894383 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7752b46a: reopening flushed file at 1731961894388 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=125, compaction requested=false at 1731961894393 (+5 ms)Writing region close event to WAL at 1731961894394 (+1 ms)Closed at 1731961894394 2024-11-18T20:31:34,394 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,395 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:34,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34591 is added to blk_1073741830_1006 (size=61320) 2024-11-18T20:31:34,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33451 is added to blk_1073741830_1006 (size=61320) 2024-11-18T20:31:34,397 INFO [M:0;c0a89b2656d4:45287 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:31:34,397 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:31:34,397 INFO [M:0;c0a89b2656d4:45287 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45287 2024-11-18T20:31:34,397 INFO [M:0;c0a89b2656d4:45287 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:31:34,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:34,499 INFO [M:0;c0a89b2656d4:45287 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:31:34,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45287-0x1005487ae9a0000, quorum=127.0.0.1:61530, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:34,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4309be89{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:34,504 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@762ca6ce{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:34,504 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:34,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dadaa88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:34,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@695df454{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:34,506 WARN [BP-1431792127-172.17.0.2-1731961835520 heartbeating to localhost/127.0.0.1:46871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:31:34,506 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:31:34,506 WARN [BP-1431792127-172.17.0.2-1731961835520 heartbeating to localhost/127.0.0.1:46871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1431792127-172.17.0.2-1731961835520 (Datanode Uuid cc59418d-b16f-4489-ba2b-917c54230618) service to localhost/127.0.0.1:46871 2024-11-18T20:31:34,506 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:31:34,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data3/current/BP-1431792127-172.17.0.2-1731961835520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:34,507 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data4/current/BP-1431792127-172.17.0.2-1731961835520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:34,507 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:31:34,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53e82728{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:34,510 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7223bbba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:34,510 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:34,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1634e03e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:34,510 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cdb7164{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:34,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:31:34,511 WARN [BP-1431792127-172.17.0.2-1731961835520 heartbeating to localhost/127.0.0.1:46871 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:31:34,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:31:34,511 WARN [BP-1431792127-172.17.0.2-1731961835520 heartbeating to localhost/127.0.0.1:46871 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1431792127-172.17.0.2-1731961835520 (Datanode Uuid a2f5eb84-5012-4a39-8106-4e48d22110fa) service to localhost/127.0.0.1:46871 2024-11-18T20:31:34,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data1/current/BP-1431792127-172.17.0.2-1731961835520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:34,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/cluster_6ef2688e-811a-bb29-925e-e529383d5866/data/data2/current/BP-1431792127-172.17.0.2-1731961835520 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:34,512 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:31:34,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@643a16ca{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:31:34,519 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ed3b4c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:34,519 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:34,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f1e6498{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:34,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59e63a8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:34,527 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:31:34,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:31:34,567 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 206) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46871 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:46871 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=85 (was 22) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2258 (was 2457) 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=85, ProcessCount=11, AvailableMemoryMB=2258 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.log.dir so I do NOT create it in target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/30c14627-f0f7-0148-e486-16c1205bb97c/hadoop.tmp.dir so I do NOT create it in target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467, deleteOnExit=true 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T20:31:34,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/test.cache.data in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T20:31:34,576 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T20:31:34,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/nfs.dump.dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/java.io.tmpdir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T20:31:34,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T20:31:34,590 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:31:34,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:31:34,632 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:31:34,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:31:34,633 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:31:34,633 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:31:34,634 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:31:34,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1141999a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:31:34,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c53e0a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:31:34,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b9e214c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/java.io.tmpdir/jetty-localhost-45129-hadoop-hdfs-3_4_1-tests_jar-_-any-12800233176859047064/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:31:34,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ff58c0c{HTTP/1.1, (http/1.1)}{localhost:45129} 2024-11-18T20:31:34,726 INFO [Time-limited test {}] server.Server(415): Started @293422ms 2024-11-18T20:31:34,737 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T20:31:34,772 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:31:34,775 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:31:34,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:31:34,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:31:34,776 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T20:31:34,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37916527{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:31:34,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11e682be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:31:34,869 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39034b65{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/java.io.tmpdir/jetty-localhost-33029-hadoop-hdfs-3_4_1-tests_jar-_-any-4338506024788376738/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:34,870 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d7fa30b{HTTP/1.1, (http/1.1)}{localhost:33029} 2024-11-18T20:31:34,870 INFO [Time-limited test {}] server.Server(415): Started @293565ms 2024-11-18T20:31:34,871 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:31:34,894 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T20:31:34,897 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T20:31:34,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T20:31:34,897 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T20:31:34,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T20:31:34,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a25273e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,AVAILABLE} 2024-11-18T20:31:34,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd30c09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T20:31:34,929 WARN [Thread-2485 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data1/current/BP-1285727592-172.17.0.2-1731961894593/current, will proceed with Du for space computation calculation, 2024-11-18T20:31:34,929 WARN [Thread-2486 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data2/current/BP-1285727592-172.17.0.2-1731961894593/current, will proceed with Du for space computation calculation, 2024-11-18T20:31:34,947 WARN [Thread-2464 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:31:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ae86655508ee9c5 with lease ID 0xb51b06ab14067eca: Processing first storage report for DS-39b4b989-65b5-4f51-8cff-636cc8f6ebf1 from datanode DatanodeRegistration(127.0.0.1:43149, datanodeUuid=0d17b0d0-f33b-41ab-bab8-64bc45432e9e, infoPort=39933, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593) 2024-11-18T20:31:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ae86655508ee9c5 with lease ID 0xb51b06ab14067eca: from storage DS-39b4b989-65b5-4f51-8cff-636cc8f6ebf1 node DatanodeRegistration(127.0.0.1:43149, datanodeUuid=0d17b0d0-f33b-41ab-bab8-64bc45432e9e, infoPort=39933, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:31:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ae86655508ee9c5 with lease ID 0xb51b06ab14067eca: Processing first storage report for DS-93d7a6d3-4fa1-440b-a156-67484fbc0658 from datanode DatanodeRegistration(127.0.0.1:43149, datanodeUuid=0d17b0d0-f33b-41ab-bab8-64bc45432e9e, infoPort=39933, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593) 2024-11-18T20:31:34,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ae86655508ee9c5 with lease ID 0xb51b06ab14067eca: from storage DS-93d7a6d3-4fa1-440b-a156-67484fbc0658 node DatanodeRegistration(127.0.0.1:43149, datanodeUuid=0d17b0d0-f33b-41ab-bab8-64bc45432e9e, infoPort=39933, infoSecurePort=0, ipcPort=38689, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:31:34,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:34,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:35,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16a48457{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/java.io.tmpdir/jetty-localhost-38811-hadoop-hdfs-3_4_1-tests_jar-_-any-12886768503719009371/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:35,007 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50a67c65{HTTP/1.1, (http/1.1)}{localhost:38811} 2024-11-18T20:31:35,007 INFO [Time-limited test {}] server.Server(415): Started @293703ms 2024-11-18T20:31:35,008 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T20:31:35,064 WARN [Thread-2511 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data3/current/BP-1285727592-172.17.0.2-1731961894593/current, will proceed with Du for space computation calculation, 2024-11-18T20:31:35,064 WARN [Thread-2512 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data4/current/BP-1285727592-172.17.0.2-1731961894593/current, will proceed with Du for space computation calculation, 2024-11-18T20:31:35,080 WARN [Thread-2500 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T20:31:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff03d3aefb8438ab with lease ID 0xb51b06ab14067ecb: Processing first storage report for DS-a26d5724-212f-45bd-900f-278dee347e01 from datanode DatanodeRegistration(127.0.0.1:40527, datanodeUuid=e9f2a205-3fd9-4e3d-86a9-284686877b7a, infoPort=35031, infoSecurePort=0, ipcPort=34543, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593) 2024-11-18T20:31:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff03d3aefb8438ab with lease ID 0xb51b06ab14067ecb: from storage DS-a26d5724-212f-45bd-900f-278dee347e01 node DatanodeRegistration(127.0.0.1:40527, datanodeUuid=e9f2a205-3fd9-4e3d-86a9-284686877b7a, infoPort=35031, infoSecurePort=0, ipcPort=34543, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:31:35,082 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xff03d3aefb8438ab with lease ID 0xb51b06ab14067ecb: Processing first storage report for DS-f595b85a-082e-4816-b106-7e5cc0e232d8 from datanode DatanodeRegistration(127.0.0.1:40527, datanodeUuid=e9f2a205-3fd9-4e3d-86a9-284686877b7a, infoPort=35031, infoSecurePort=0, ipcPort=34543, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593) 2024-11-18T20:31:35,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xff03d3aefb8438ab with lease ID 0xb51b06ab14067ecb: from storage DS-f595b85a-082e-4816-b106-7e5cc0e232d8 node DatanodeRegistration(127.0.0.1:40527, datanodeUuid=e9f2a205-3fd9-4e3d-86a9-284686877b7a, infoPort=35031, infoSecurePort=0, ipcPort=34543, storageInfo=lv=-57;cid=testClusterID;nsid=1370867202;c=1731961894593), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T20:31:35,128 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9 2024-11-18T20:31:35,132 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/zookeeper_0, clientPort=60185, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T20:31:35,132 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60185 2024-11-18T20:31:35,133 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,134 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:31:35,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741825_1001 (size=7) 2024-11-18T20:31:35,145 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a with version=8 2024-11-18T20:31:35,145 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:35383/user/jenkins/test-data/3e41fdad-60e9-7fc6-70f7-efda04b8c094/hbase-staging 2024-11-18T20:31:35,147 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T20:31:35,148 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:31:35,149 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46075 2024-11-18T20:31:35,150 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46075 connecting to ZooKeeper ensemble=127.0.0.1:60185 2024-11-18T20:31:35,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:460750x0, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:31:35,154 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46075-0x100548895490000 connected 2024-11-18T20:31:35,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,168 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,169 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:35,170 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a, hbase.cluster.distributed=false 2024-11-18T20:31:35,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:31:35,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46075 2024-11-18T20:31:35,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46075 2024-11-18T20:31:35,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46075 2024-11-18T20:31:35,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46075 2024-11-18T20:31:35,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46075 2024-11-18T20:31:35,190 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c0a89b2656d4:0 server-side Connection retries=45 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T20:31:35,190 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T20:31:35,191 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39421 2024-11-18T20:31:35,192 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39421 connecting to ZooKeeper ensemble=127.0.0.1:60185 2024-11-18T20:31:35,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394210x0, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T20:31:35,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:35,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39421-0x100548895490001 connected 2024-11-18T20:31:35,197 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T20:31:35,197 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T20:31:35,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T20:31:35,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T20:31:35,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39421 2024-11-18T20:31:35,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39421 2024-11-18T20:31:35,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39421 2024-11-18T20:31:35,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39421 2024-11-18T20:31:35,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39421 2024-11-18T20:31:35,214 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c0a89b2656d4:46075 2024-11-18T20:31:35,214 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:31:35,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:31:35,215 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T20:31:35,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,217 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T20:31:35,217 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c0a89b2656d4,46075,1731961895147 from backup master directory 2024-11-18T20:31:35,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:31:35,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T20:31:35,217 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:31:35,217 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,220 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/hbase.id] with ID: 9d02266c-3627-48c3-ad38-e10e89867139 2024-11-18T20:31:35,220 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/.tmp/hbase.id 2024-11-18T20:31:35,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:31:35,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741826_1002 (size=42) 2024-11-18T20:31:35,226 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/.tmp/hbase.id]:[hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/hbase.id] 2024-11-18T20:31:35,236 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:35,236 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T20:31:35,237 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T20:31:35,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:31:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741827_1003 (size=196) 2024-11-18T20:31:35,244 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T20:31:35,245 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T20:31:35,245 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:31:35,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:31:35,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741828_1004 (size=1189) 2024-11-18T20:31:35,252 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store 2024-11-18T20:31:35,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:31:35,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741829_1005 (size=34) 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:31:35,258 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:35,258 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:35,258 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961895258Disabling compacts and flushes for region at 1731961895258Disabling writes for close at 1731961895258Writing region close event to WAL at 1731961895258Closed at 1731961895258 2024-11-18T20:31:35,259 WARN [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/.initializing 2024-11-18T20:31:35,259 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/WALs/c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,262 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C46075%2C1731961895147, suffix=, logDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/WALs/c0a89b2656d4,46075,1731961895147, archiveDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/oldWALs, maxLogs=10 2024-11-18T20:31:35,262 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C46075%2C1731961895147.1731961895262 2024-11-18T20:31:35,267 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/WALs/c0a89b2656d4,46075,1731961895147/c0a89b2656d4%2C46075%2C1731961895147.1731961895262 2024-11-18T20:31:35,269 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39933:39933),(127.0.0.1/127.0.0.1:35031:35031)] 2024-11-18T20:31:35,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:31:35,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:31:35,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,276 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T20:31:35,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T20:31:35,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:31:35,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T20:31:35,283 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:31:35,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T20:31:35,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T20:31:35,286 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,287 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,287 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,288 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,288 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,288 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T20:31:35,289 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T20:31:35,291 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:31:35,292 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769579, jitterRate=-0.021430477499961853}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T20:31:35,292 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731961895276Initializing all the Stores at 1731961895277 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895277Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961895277Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961895277Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961895277Cleaning up temporary data from old regions at 1731961895288 (+11 ms)Region opened successfully at 1731961895292 (+4 ms) 2024-11-18T20:31:35,292 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T20:31:35,295 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@497b91ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:31:35,296 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T20:31:35,296 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T20:31:35,297 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T20:31:35,297 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T20:31:35,297 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T20:31:35,298 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T20:31:35,298 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T20:31:35,303 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T20:31:35,304 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T20:31:35,305 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T20:31:35,305 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T20:31:35,306 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T20:31:35,306 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T20:31:35,307 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T20:31:35,310 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T20:31:35,310 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T20:31:35,311 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T20:31:35,312 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T20:31:35,315 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T20:31:35,315 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T20:31:35,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:35,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:35,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,317 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c0a89b2656d4,46075,1731961895147, sessionid=0x100548895490000, setting cluster-up flag (Was=false) 2024-11-18T20:31:35,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,327 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T20:31:35,328 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,333 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T20:31:35,334 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:35,335 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T20:31:35,336 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T20:31:35,337 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T20:31:35,337 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T20:31:35,337 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c0a89b2656d4,46075,1731961895147 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=5, maxPoolSize=5 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c0a89b2656d4:0, corePoolSize=10, maxPoolSize=10 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:31:35,338 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,346 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:31:35,347 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T20:31:35,348 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,348 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731961925350 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T20:31:35,351 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,352 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T20:31:35,352 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T20:31:35,352 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T20:31:35,352 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T20:31:35,352 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T20:31:35,353 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961895352,5,FailOnTimeoutGroup] 2024-11-18T20:31:35,357 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961895353,5,FailOnTimeoutGroup] 2024-11-18T20:31:35,357 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,357 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T20:31:35,357 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,357 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:31:35,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741831_1007 (size=1321) 2024-11-18T20:31:35,363 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T20:31:35,364 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a 2024-11-18T20:31:35,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:31:35,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741832_1008 (size=32) 2024-11-18T20:31:35,377 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:31:35,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:31:35,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:31:35,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,381 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:31:35,382 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:31:35,382 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,383 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:31:35,384 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:31:35,384 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:31:35,386 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:31:35,386 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,386 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,386 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:31:35,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740 2024-11-18T20:31:35,387 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740 2024-11-18T20:31:35,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:31:35,389 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:31:35,389 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:31:35,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:31:35,392 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T20:31:35,392 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713176, jitterRate=-0.09315051138401031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:31:35,392 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731961895377Initializing all the Stores at 1731961895378 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895378Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895378Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961895378Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895378Cleaning up temporary data from old regions at 1731961895389 (+11 ms)Region opened successfully at 1731961895392 (+3 ms) 2024-11-18T20:31:35,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:31:35,393 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:31:35,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:31:35,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:31:35,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:31:35,393 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:31:35,393 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961895393Disabling compacts and flushes for region at 1731961895393Disabling writes for close at 1731961895393Writing region close event to WAL at 1731961895393Closed at 1731961895393 2024-11-18T20:31:35,394 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:31:35,394 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T20:31:35,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T20:31:35,395 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:31:35,396 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T20:31:35,403 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(746): ClusterId : 9d02266c-3627-48c3-ad38-e10e89867139 2024-11-18T20:31:35,403 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T20:31:35,404 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T20:31:35,404 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T20:31:35,406 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T20:31:35,406 DEBUG [RS:0;c0a89b2656d4:39421 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14a3a909, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c0a89b2656d4/172.17.0.2:0 2024-11-18T20:31:35,423 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c0a89b2656d4:39421 2024-11-18T20:31:35,423 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T20:31:35,423 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T20:31:35,423 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T20:31:35,424 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(2659): reportForDuty to master=c0a89b2656d4,46075,1731961895147 with port=39421, startcode=1731961895190 2024-11-18T20:31:35,424 DEBUG [RS:0;c0a89b2656d4:39421 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T20:31:35,426 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36073, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T20:31:35,426 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46075 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,426 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46075 {}] master.ServerManager(517): Registering regionserver=c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,428 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a 2024-11-18T20:31:35,428 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34531 2024-11-18T20:31:35,428 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T20:31:35,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:31:35,430 DEBUG [RS:0;c0a89b2656d4:39421 {}] zookeeper.ZKUtil(111): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,430 WARN [RS:0;c0a89b2656d4:39421 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T20:31:35,430 INFO [RS:0;c0a89b2656d4:39421 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:31:35,430 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,430 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c0a89b2656d4,39421,1731961895190] 2024-11-18T20:31:35,436 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T20:31:35,438 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T20:31:35,439 INFO [RS:0;c0a89b2656d4:39421 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T20:31:35,439 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,441 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T20:31:35,442 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T20:31:35,442 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c0a89b2656d4:0, corePoolSize=2, maxPoolSize=2 2024-11-18T20:31:35,442 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c0a89b2656d4:0, corePoolSize=1, maxPoolSize=1 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:31:35,443 DEBUG [RS:0;c0a89b2656d4:39421 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c0a89b2656d4:0, corePoolSize=3, maxPoolSize=3 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,449 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,39421,1731961895190-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:31:35,468 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T20:31:35,468 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,39421,1731961895190-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,468 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,468 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.Replication(171): c0a89b2656d4,39421,1731961895190 started 2024-11-18T20:31:35,484 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:35,484 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1482): Serving as c0a89b2656d4,39421,1731961895190, RpcServer on c0a89b2656d4/172.17.0.2:39421, sessionid=0x100548895490001 2024-11-18T20:31:35,484 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T20:31:35,484 DEBUG [RS:0;c0a89b2656d4:39421 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,484 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,39421,1731961895190' 2024-11-18T20:31:35,484 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c0a89b2656d4,39421,1731961895190' 2024-11-18T20:31:35,485 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T20:31:35,486 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T20:31:35,486 DEBUG [RS:0;c0a89b2656d4:39421 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T20:31:35,486 INFO [RS:0;c0a89b2656d4:39421 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T20:31:35,486 INFO [RS:0;c0a89b2656d4:39421 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T20:31:35,546 WARN [c0a89b2656d4:46075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T20:31:35,588 INFO [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C39421%2C1731961895190, suffix=, logDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/c0a89b2656d4,39421,1731961895190, archiveDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs, maxLogs=32 2024-11-18T20:31:35,589 INFO [RS:0;c0a89b2656d4:39421 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C39421%2C1731961895190.1731961895588 2024-11-18T20:31:35,606 INFO [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/c0a89b2656d4,39421,1731961895190/c0a89b2656d4%2C39421%2C1731961895190.1731961895588 2024-11-18T20:31:35,615 DEBUG [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39933:39933),(127.0.0.1/127.0.0.1:35031:35031)] 2024-11-18T20:31:35,797 DEBUG [c0a89b2656d4:46075 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T20:31:35,797 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,798 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,39421,1731961895190, state=OPENING 2024-11-18T20:31:35,799 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T20:31:35,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:35,801 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:31:35,801 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T20:31:35,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,39421,1731961895190}] 2024-11-18T20:31:35,802 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:31:35,954 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T20:31:35,955 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45431, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T20:31:35,959 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T20:31:35,959 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:31:35,961 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c0a89b2656d4%2C39421%2C1731961895190.meta, suffix=.meta, logDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/c0a89b2656d4,39421,1731961895190, archiveDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs, maxLogs=32 2024-11-18T20:31:35,961 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c0a89b2656d4%2C39421%2C1731961895190.meta.1731961895961.meta 2024-11-18T20:31:35,966 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/c0a89b2656d4,39421,1731961895190/c0a89b2656d4%2C39421%2C1731961895190.meta.1731961895961.meta 2024-11-18T20:31:35,973 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35031:35031),(127.0.0.1/127.0.0.1:39933:39933)] 2024-11-18T20:31:35,977 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T20:31:35,978 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T20:31:35,978 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T20:31:35,980 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T20:31:35,981 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T20:31:35,981 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T20:31:35,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T20:31:35,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T20:31:35,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T20:31:35,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T20:31:35,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T20:31:35,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T20:31:35,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T20:31:35,985 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T20:31:35,985 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740 2024-11-18T20:31:35,986 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740 2024-11-18T20:31:35,987 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T20:31:35,987 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T20:31:35,988 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T20:31:35,989 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T20:31:35,989 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740600, jitterRate=-0.058279216289520264}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T20:31:35,989 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T20:31:35,990 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731961895978Writing region info on filesystem at 1731961895978Initializing all the Stores at 1731961895979 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895979Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895980 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731961895980Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731961895980Cleaning up temporary data from old regions at 1731961895987 (+7 ms)Running coprocessor post-open hooks at 1731961895989 (+2 ms)Region opened successfully at 1731961895990 (+1 ms) 2024-11-18T20:31:35,991 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731961895953 2024-11-18T20:31:35,993 DEBUG [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T20:31:35,993 INFO [RS_OPEN_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T20:31:35,993 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,994 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c0a89b2656d4,39421,1731961895190, state=OPEN 2024-11-18T20:31:35,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,40455,1731961704988/c0a89b2656d4%2C40455%2C1731961704988.meta.1731961705793.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:35,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39387/user/jenkins/test-data/c526d080-8a13-4d0e-85f8-7a450d4996f8/WALs/c0a89b2656d4,42387,1731961705932/c0a89b2656d4%2C42387%2C1731961705932.1731961706120 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T20:31:35,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:31:35,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T20:31:35,996 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:35,997 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:31:35,997 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T20:31:35,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T20:31:35,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c0a89b2656d4,39421,1731961895190 in 196 msec 2024-11-18T20:31:36,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T20:31:36,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-18T20:31:36,001 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T20:31:36,001 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T20:31:36,002 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:31:36,002 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,39421,1731961895190, seqNum=-1] 2024-11-18T20:31:36,002 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:31:36,003 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34971, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:31:36,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 672 msec 2024-11-18T20:31:36,008 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731961896008, completionTime=-1 2024-11-18T20:31:36,008 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T20:31:36,008 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731961956010 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731962016010 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c0a89b2656d4:46075, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,010 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,012 DEBUG [master/c0a89b2656d4:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T20:31:36,013 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.795sec 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T20:31:36,014 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T20:31:36,016 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T20:31:36,016 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T20:31:36,016 INFO [master/c0a89b2656d4:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c0a89b2656d4,46075,1731961895147-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T20:31:36,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@307592bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:31:36,104 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c0a89b2656d4,46075,-1 for getting cluster id 2024-11-18T20:31:36,105 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T20:31:36,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T20:31:36,108 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9d02266c-3627-48c3-ad38-e10e89867139' 2024-11-18T20:31:36,108 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T20:31:36,109 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9d02266c-3627-48c3-ad38-e10e89867139" 2024-11-18T20:31:36,109 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@adb1068, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:31:36,109 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c0a89b2656d4,46075,-1] 2024-11-18T20:31:36,110 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T20:31:36,110 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,111 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40142, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T20:31:36,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62dd2bce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T20:31:36,113 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T20:31:36,114 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c0a89b2656d4,39421,1731961895190, seqNum=-1] 2024-11-18T20:31:36,114 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T20:31:36,115 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57122, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T20:31:36,117 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:36,117 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T20:31:36,119 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T20:31:36,119 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T20:31:36,121 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/test.com,8080,1, archiveDir=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs, maxLogs=32 2024-11-18T20:31:36,122 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731961896122 2024-11-18T20:31:36,127 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961896122 2024-11-18T20:31:36,127 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39933:39933),(127.0.0.1/127.0.0.1:35031:35031)] 2024-11-18T20:31:36,128 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731961896128 2024-11-18T20:31:36,133 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,134 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961896122 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961896128 2024-11-18T20:31:36,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741835_1011 (size=93) 2024-11-18T20:31:36,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741835_1011 (size=93) 2024-11-18T20:31:36,138 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35031:35031),(127.0.0.1/127.0.0.1:39933:39933)] 2024-11-18T20:31:36,139 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/WALs/test.com,8080,1/test.com%2C8080%2C1.1731961896122 to hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs/test.com%2C8080%2C1.1731961896122 2024-11-18T20:31:36,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741836_1012 (size=93) 2024-11-18T20:31:36,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741836_1012 (size=93) 2024-11-18T20:31:36,142 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs 2024-11-18T20:31:36,142 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731961896128) 2024-11-18T20:31:36,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T20:31:36,143 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:31:36,143 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:36,143 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,143 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,143 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T20:31:36,143 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T20:31:36,143 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1668418542, stopped=false 2024-11-18T20:31:36,143 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c0a89b2656d4,46075,1731961895147 2024-11-18T20:31:36,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:36,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T20:31:36,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:36,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:36,144 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:31:36,144 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T20:31:36,144 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:36,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,144 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:36,144 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c0a89b2656d4,39421,1731961895190' ***** 2024-11-18T20:31:36,144 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T20:31:36,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T20:31:36,145 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(959): stopping server c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c0a89b2656d4:39421. 2024-11-18T20:31:36,145 DEBUG [RS:0;c0a89b2656d4:39421 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T20:31:36,145 DEBUG [RS:0;c0a89b2656d4:39421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T20:31:36,145 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T20:31:36,145 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T20:31:36,145 DEBUG [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T20:31:36,146 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T20:31:36,146 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T20:31:36,146 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T20:31:36,146 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T20:31:36,146 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T20:31:36,146 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T20:31:36,162 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/.tmp/ns/f369d558cf8a44af8eeed5e55cbb7821 is 43, key is default/ns:d/1731961896004/Put/seqid=0 2024-11-18T20:31:36,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741837_1013 (size=5153) 2024-11-18T20:31:36,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741837_1013 (size=5153) 2024-11-18T20:31:36,166 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/.tmp/ns/f369d558cf8a44af8eeed5e55cbb7821 2024-11-18T20:31:36,170 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/.tmp/ns/f369d558cf8a44af8eeed5e55cbb7821 as hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/ns/f369d558cf8a44af8eeed5e55cbb7821 2024-11-18T20:31:36,174 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/ns/f369d558cf8a44af8eeed5e55cbb7821, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T20:31:36,175 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-11-18T20:31:36,179 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T20:31:36,179 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T20:31:36,180 INFO [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T20:31:36,180 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731961896146Running coprocessor pre-close hooks at 1731961896146Disabling compacts and flushes for region at 1731961896146Disabling writes for close at 1731961896146Obtaining lock to block concurrent updates at 1731961896146Preparing flush snapshotting stores in 1588230740 at 1731961896146Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731961896146Flushing stores of hbase:meta,,1.1588230740 at 1731961896147 (+1 ms)Flushing 1588230740/ns: creating writer at 1731961896147Flushing 1588230740/ns: appending metadata at 1731961896161 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731961896161Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cebc193: reopening flushed file at 1731961896170 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1731961896175 (+5 ms)Writing region close event to WAL at 1731961896176 (+1 ms)Running coprocessor post-close hooks at 1731961896179 (+3 ms)Closed at 1731961896179 2024-11-18T20:31:36,180 DEBUG [RS_CLOSE_META-regionserver/c0a89b2656d4:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T20:31:36,346 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(976): stopping server c0a89b2656d4,39421,1731961895190; all regions closed. 2024-11-18T20:31:36,346 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,347 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,347 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,347 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,347 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:31:36,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741834_1010 (size=1152) 2024-11-18T20:31:36,354 DEBUG [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs 2024-11-18T20:31:36,354 INFO [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C39421%2C1731961895190.meta:.meta(num 1731961895961) 2024-11-18T20:31:36,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:31:36,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741833_1009 (size=93) 2024-11-18T20:31:36,362 DEBUG [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/oldWALs 2024-11-18T20:31:36,362 INFO [RS:0;c0a89b2656d4:39421 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c0a89b2656d4%2C39421%2C1731961895190:(num 1731961895588) 2024-11-18T20:31:36,362 DEBUG [RS:0;c0a89b2656d4:39421 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T20:31:36,362 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T20:31:36,362 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:31:36,363 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.ChoreService(370): Chore service for: regionserver/c0a89b2656d4:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T20:31:36,363 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:31:36,363 INFO [regionserver/c0a89b2656d4:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:31:36,363 INFO [RS:0;c0a89b2656d4:39421 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39421 2024-11-18T20:31:36,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T20:31:36,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c0a89b2656d4,39421,1731961895190 2024-11-18T20:31:36,365 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:31:36,366 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c0a89b2656d4,39421,1731961895190] 2024-11-18T20:31:36,367 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c0a89b2656d4,39421,1731961895190 already deleted, retry=false 2024-11-18T20:31:36,367 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c0a89b2656d4,39421,1731961895190 expired; onlineServers=0 2024-11-18T20:31:36,367 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c0a89b2656d4,46075,1731961895147' ***** 2024-11-18T20:31:36,367 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T20:31:36,367 INFO [M:0;c0a89b2656d4:46075 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T20:31:36,367 INFO [M:0;c0a89b2656d4:46075 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T20:31:36,367 DEBUG [M:0;c0a89b2656d4:46075 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T20:31:36,367 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T20:31:36,367 DEBUG [M:0;c0a89b2656d4:46075 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T20:31:36,367 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961895352 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.large.0-1731961895352,5,FailOnTimeoutGroup] 2024-11-18T20:31:36,367 DEBUG [master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961895353 {}] cleaner.HFileCleaner(306): Exit Thread[master/c0a89b2656d4:0:becomeActiveMaster-HFileCleaner.small.0-1731961895353,5,FailOnTimeoutGroup] 2024-11-18T20:31:36,367 INFO [M:0;c0a89b2656d4:46075 {}] hbase.ChoreService(370): Chore service for: master/c0a89b2656d4:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T20:31:36,367 INFO [M:0;c0a89b2656d4:46075 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T20:31:36,367 DEBUG [M:0;c0a89b2656d4:46075 {}] master.HMaster(1795): Stopping service threads 2024-11-18T20:31:36,367 INFO [M:0;c0a89b2656d4:46075 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T20:31:36,368 INFO [M:0;c0a89b2656d4:46075 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T20:31:36,368 INFO [M:0;c0a89b2656d4:46075 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T20:31:36,368 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T20:31:36,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T20:31:36,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T20:31:36,368 DEBUG [M:0;c0a89b2656d4:46075 {}] zookeeper.ZKUtil(347): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T20:31:36,368 WARN [M:0;c0a89b2656d4:46075 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T20:31:36,369 INFO [M:0;c0a89b2656d4:46075 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/.lastflushedseqids 2024-11-18T20:31:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741838_1014 (size=99) 2024-11-18T20:31:36,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741838_1014 (size=99) 2024-11-18T20:31:36,375 INFO [M:0;c0a89b2656d4:46075 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T20:31:36,375 INFO [M:0;c0a89b2656d4:46075 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T20:31:36,375 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T20:31:36,375 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:36,375 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:36,375 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T20:31:36,375 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:36,375 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T20:31:36,389 DEBUG [M:0;c0a89b2656d4:46075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a8c644aaba347aca0be3805df245f58 is 82, key is hbase:meta,,1/info:regioninfo/1731961895993/Put/seqid=0 2024-11-18T20:31:36,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741839_1015 (size=5672) 2024-11-18T20:31:36,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741839_1015 (size=5672) 2024-11-18T20:31:36,394 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a8c644aaba347aca0be3805df245f58 2024-11-18T20:31:36,413 DEBUG [M:0;c0a89b2656d4:46075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c481402b2ac4e95a08e15dd1bce8a34 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731961896008/Put/seqid=0 2024-11-18T20:31:36,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741840_1016 (size=5275) 2024-11-18T20:31:36,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741840_1016 (size=5275) 2024-11-18T20:31:36,417 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c481402b2ac4e95a08e15dd1bce8a34 2024-11-18T20:31:36,435 DEBUG [M:0;c0a89b2656d4:46075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b16a3c69e4f349baaf416e88eccb5aa4 is 69, key is c0a89b2656d4,39421,1731961895190/rs:state/1731961895427/Put/seqid=0 2024-11-18T20:31:36,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741841_1017 (size=5156) 2024-11-18T20:31:36,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741841_1017 (size=5156) 2024-11-18T20:31:36,439 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b16a3c69e4f349baaf416e88eccb5aa4 2024-11-18T20:31:36,455 DEBUG [M:0;c0a89b2656d4:46075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2906d9bd31dd47658920e15f08d9b0e5 is 52, key is load_balancer_on/state:d/1731961896118/Put/seqid=0 2024-11-18T20:31:36,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741842_1018 (size=5056) 2024-11-18T20:31:36,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741842_1018 (size=5056) 2024-11-18T20:31:36,460 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2906d9bd31dd47658920e15f08d9b0e5 2024-11-18T20:31:36,465 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3a8c644aaba347aca0be3805df245f58 as hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3a8c644aaba347aca0be3805df245f58 2024-11-18T20:31:36,466 INFO [RS:0;c0a89b2656d4:39421 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:31:36,466 INFO [RS:0;c0a89b2656d4:39421 {}] regionserver.HRegionServer(1031): Exiting; stopping=c0a89b2656d4,39421,1731961895190; zookeeper connection closed. 2024-11-18T20:31:36,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:36,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39421-0x100548895490001, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:36,466 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2090d3c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2090d3c0 2024-11-18T20:31:36,466 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T20:31:36,469 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3a8c644aaba347aca0be3805df245f58, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T20:31:36,470 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c481402b2ac4e95a08e15dd1bce8a34 as hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7c481402b2ac4e95a08e15dd1bce8a34 2024-11-18T20:31:36,475 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7c481402b2ac4e95a08e15dd1bce8a34, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T20:31:36,476 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b16a3c69e4f349baaf416e88eccb5aa4 as hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b16a3c69e4f349baaf416e88eccb5aa4 2024-11-18T20:31:36,480 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b16a3c69e4f349baaf416e88eccb5aa4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T20:31:36,481 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2906d9bd31dd47658920e15f08d9b0e5 as hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2906d9bd31dd47658920e15f08d9b0e5 2024-11-18T20:31:36,484 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34531/user/jenkins/test-data/5c843400-e91e-58dd-eb91-ce94ed27f86a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2906d9bd31dd47658920e15f08d9b0e5, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T20:31:36,485 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false 2024-11-18T20:31:36,486 INFO [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T20:31:36,486 DEBUG [M:0;c0a89b2656d4:46075 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731961896375Disabling compacts and flushes for region at 1731961896375Disabling writes for close at 1731961896375Obtaining lock to block concurrent updates at 1731961896375Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731961896375Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731961896376 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731961896376Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731961896376Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731961896389 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731961896389Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731961896397 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731961896412 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731961896412Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731961896420 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731961896434 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731961896434Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731961896443 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731961896455 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731961896455Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69202be2: reopening flushed file at 1731961896464 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@700a4eb4: reopening flushed file at 1731961896470 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23d1eac3: reopening flushed file at 1731961896475 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30f3231: reopening flushed file at 1731961896480 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=29, compaction requested=false at 1731961896485 (+5 ms)Writing region close event to WAL at 1731961896486 (+1 ms)Closed at 1731961896486 2024-11-18T20:31:36,487 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,487 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,487 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,487 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,487 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T20:31:36,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40527 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:31:36,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43149 is added to blk_1073741830_1006 (size=10311) 2024-11-18T20:31:36,489 INFO [M:0;c0a89b2656d4:46075 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T20:31:36,489 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T20:31:36,489 INFO [M:0;c0a89b2656d4:46075 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46075 2024-11-18T20:31:36,489 INFO [M:0;c0a89b2656d4:46075 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T20:31:36,590 INFO [M:0;c0a89b2656d4:46075 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T20:31:36,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:36,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46075-0x100548895490000, quorum=127.0.0.1:60185, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T20:31:36,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16a48457{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:36,595 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50a67c65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:36,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:36,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd30c09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:36,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a25273e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:36,597 WARN [BP-1285727592-172.17.0.2-1731961894593 heartbeating to localhost/127.0.0.1:34531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:31:36,597 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:31:36,597 WARN [BP-1285727592-172.17.0.2-1731961894593 heartbeating to localhost/127.0.0.1:34531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285727592-172.17.0.2-1731961894593 (Datanode Uuid e9f2a205-3fd9-4e3d-86a9-284686877b7a) service to localhost/127.0.0.1:34531 2024-11-18T20:31:36,597 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:31:36,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data3/current/BP-1285727592-172.17.0.2-1731961894593 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:36,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data4/current/BP-1285727592-172.17.0.2-1731961894593 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:36,599 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:31:36,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39034b65{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T20:31:36,602 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d7fa30b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:36,602 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:36,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11e682be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:36,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37916527{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:36,603 WARN [BP-1285727592-172.17.0.2-1731961894593 heartbeating to localhost/127.0.0.1:34531 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T20:31:36,603 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T20:31:36,603 WARN [BP-1285727592-172.17.0.2-1731961894593 heartbeating to localhost/127.0.0.1:34531 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1285727592-172.17.0.2-1731961894593 (Datanode Uuid 0d17b0d0-f33b-41ab-bab8-64bc45432e9e) service to localhost/127.0.0.1:34531 2024-11-18T20:31:36,603 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T20:31:36,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data1/current/BP-1285727592-172.17.0.2-1731961894593 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:36,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/cluster_2901690f-a892-5307-c457-7ad7f82b8467/data/data2/current/BP-1285727592-172.17.0.2-1731961894593 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T20:31:36,604 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T20:31:36,609 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b9e214c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T20:31:36,610 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ff58c0c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T20:31:36,610 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T20:31:36,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c53e0a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T20:31:36,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1141999a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c6417beb-44b2-1b60-77b4-0194caba54f9/hadoop.log.dir/,STOPPED} 2024-11-18T20:31:36,616 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T20:31:36,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T20:31:36,638 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 229) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:34531 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34531 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34531 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:34531 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34531 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=85 (was 85), ProcessCount=11 (was 11), AvailableMemoryMB=2242 (was 2258)